From b733acba6603bfd3a8edaf1f3fa5d80d02a4ce01 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Mon, 8 Jul 2024 17:46:43 -0400
Subject: [PATCH 01/90] Add model configuration for rest
---
samples/rest/model_configuration.sh | 30 +++++++++++++++++++++++++++++
1 file changed, 30 insertions(+)
create mode 100644 samples/rest/model_configuration.sh
diff --git a/samples/rest/model_configuration.sh b/samples/rest/model_configuration.sh
new file mode 100644
index 000000000..ba10ff813
--- /dev/null
+++ b/samples/rest/model_configuration.sh
@@ -0,0 +1,30 @@
+set -eu
+
+echo "[START configure_model]"
+# [START configure_model]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Write a story about a magic backpack."}
+ ]
+ }],
+ "safetySettings": [
+ {
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
+ "threshold": "BLOCK_ONLY_HIGH"
+ }
+ ],
+ "generationConfig": {
+ "stopSequences": [
+ "Title"
+ ],
+ "temperature": 1.0,
+ "maxOutputTokens": 800,
+ "topP": 0.8,
+ "topK": 10
+ }
+ }' 2> /dev/null | grep "text"
+# [END configure_model]
From a4e501eafcc2402590cd3efcaa9e5e0bb3292911 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Tue, 9 Jul 2024 08:16:46 -0700
Subject: [PATCH 02/90] Sync count tokens examples (#445)
* Sync count tokens examples
Change-Id: Idd9cd0956ad9b2fa7d95d8ab792b1673dc1e88a8
* format
Change-Id: I7e7ecd1b4c0e060ef91bad0f5083616f0394705a
* Fix video file name.
Change-Id: Id1ff1196fa8072cc90d1a3921846687687b180b4
---
samples/count_tokens.py | 220 ++++++++++++++++++++++++++++++++++++----
1 file changed, 199 insertions(+), 21 deletions(-)
diff --git a/samples/count_tokens.py b/samples/count_tokens.py
index ca42a1bb6..827fe5f1d 100644
--- a/samples/count_tokens.py
+++ b/samples/count_tokens.py
@@ -21,75 +21,247 @@
class UnitTests(absltest.TestCase):
+ def test_tokens_context_window(self):
+ # [START tokens_context_window]
+ model_info = genai.get_model("models/gemini-1.0-pro-001")
+ # Returns the "context window" for the model (the combined input and output token limits)
+ print(f"{model_info.input_token_limit=}")
+ print(f"{model_info.output_token_limit=}")
+ # [END tokens_context_window]
+
+ # [START tokens_context_window_return]
+ # input_token_limit=30720
+ # output_token_limit=2048
+ # [END tokens_context_window_return]
+
def test_tokens_text_only(self):
# [START tokens_text_only]
model = genai.GenerativeModel("models/gemini-1.5-flash")
- print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
+
+ prompt = "The quick brown fox jumps over the lazy dog."
+
+ # Call `count_tokens` to get the input token count (`total_tokens`).
+ print("total_tokens: ", model.count_tokens(prompt))
+
+ response = model.generate_content(prompt)
+
+ # Use `usage_metadata` to get both input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively).
+ print(response.usage_metadata)
# [END tokens_text_only]
+ # [START tokens_text_only_return]
+ # total_tokens: total_tokens: 10
+ #
+ # prompt_token_count: 11
+ # candidates_token_count: 73
+ # total_token_count: 84
+ # [END tokens_text_only_return]
+
def test_tokens_chat(self):
# [START tokens_chat]
model = genai.GenerativeModel("models/gemini-1.5-flash")
+
chat = model.start_chat(
history=[
- {"role": "user", "parts": "Hi, my name is Bob."},
+ {"role": "user", "parts": "Hi my name is Bob"},
{"role": "model", "parts": "Hi Bob!"},
]
)
- model.count_tokens(chat.history)
+ # Call `count_tokens` to get the input token count (`total_tokens`).
+ print(model.count_tokens(chat.history))
+
+ response = chat.send_message(
+ "In one sentence, explain how a computer works to a young child."
+ )
+ # Use `usage_metadata` to get both input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively).
+ print(response.usage_metadata)
+ # TODO add comment...
from google.generativeai.types.content_types import to_contents
- model.count_tokens(chat.history + to_contents("What is the meaning of life?"))
+ print(model.count_tokens(chat.history + to_contents("What is the meaning of life?")))
# [END tokens_chat]
+ # [START tokens_chat_return]
+ # total_tokens: 10
+ #
+ # prompt_token_count: 25
+ # candidates_token_count: 21
+ # total_token_count: 46
+ #
+ # total_tokens: 56
+ # [END tokens_chat_return]
+
def test_tokens_multimodal_image_inline(self):
# [START tokens_multimodal_image_inline]
+ import PIL.Image
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
- import PIL
- organ = PIL.Image.open(media / "organ.jpg")
- print(model.count_tokens(["Tell me about this instrument", organ]))
+ prompt = "Tell me about this image"
+ your_image_file = PIL.Image.open("image.jpg")
+
+ # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
+ # An image's display size does not affect its token count.
+ # Optionally, you can call `count_tokens` for the prompt and file separately.
+ print(model.count_tokens([prompt, your_image_file]))
+
+ response = model.generate_content([prompt, your_image_file])
+ # Use `usage_metadata` to get both input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively).
+ print(response.usage_metadata)
# [END tokens_multimodal_image_inline]
+ # [START tokens_multimodal_image_inline_return]
+ # total_tokens: 263
+ #
+ # prompt_token_count: 264
+ # candidates_token_count: 81
+ # total_token_count: 345
+ # [END tokens_multimodal_image_inline_return]
+
def test_tokens_multimodal_image_file_api(self):
# [START tokens_multimodal_image_file_api]
model = genai.GenerativeModel("models/gemini-1.5-flash")
- organ_upload = genai.upload_file(media / "organ.jpg")
- print(model.count_tokens(["Tell me about this instrument", organ_upload]))
+
+ prompt = "Tell me about this image"
+ your_image_file = genai.upload_file(path="image.jpg")
+
+ # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
+ # An image's display size does not affect its token count.
+ # Optionally, you can call `count_tokens` for the prompt and file separately.
+ print(model.count_tokens([prompt, your_image_file]))
+
+ response = model.generate_content([prompt, your_image_file])
+ response.text
+ # Use `usage_metadata` to get both input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively).
+ print(response.usage_metadata)
# [END tokens_multimodal_image_file_api]
+ # [START tokens_multimodal_image_file_api_return]
+ # total_tokens: 263
+ #
+ # prompt_token_count: 264
+ # candidates_token_count: 80
+ # total_token_count: 344
+ # [END tokens_multimodal_image_file_api_return]
+
def test_tokens_multimodal_video_audio_file_api(self):
# [START tokens_multimodal_video_audio_file_api]
+ import time
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
- audio_upload = genai.upload_file(media / "sample.mp3")
- print(model.count_tokens(audio_upload))
+
+ prompt = "Tell me about this video"
+ your_file = genai.upload_file(path=media / "Big_Buck_Bunny.mp4")
+
+ # Videos need to be processed before you can use them.
+ while your_file.state.name == "PROCESSING":
+ print("processing video...")
+ time.sleep(5)
+ your_file = genai.get_file(your_file.name)
+
+ # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
+ # A video or audio file is converted to tokens at a fixed rate of tokens per second.
+ # Optionally, you can call `count_tokens` for the prompt and file separately.
+ print(model.count_tokens([prompt, your_file]))
+
+ response = model.generate_content([prompt, your_file])
+
+ # Use `usage_metadata` to get both input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively).
+ print(response.usage_metadata)
+
# [END tokens_multimodal_video_audio_file_api]
+ # [START tokens_multimodal_video_audio_file_api_return]
+ # processing video...
+ # total_tokens: 300
+ #
+ # prompt_token_count: 301
+ # candidates_token_count: 60
+ # total_token_count: 361
+ # [END tokens_multimodal_video_audio_file_api_return]
+
def test_tokens_cached_content(self):
# [START tokens_cached_content]
- document = genai.upload_file(path=media / "a11.txt")
- model_name = "gemini-1.5-flash-001"
+ import time
+
+ model = genai.GenerativeModel("models/gemini-1.5-flash")
+
+ your_file = genai.upload_file(path=media / "a11.txt")
+
cache = genai.caching.CachedContent.create(
- model=model_name,
- contents=[document],
+ model="models/gemini-1.5-flash-001",
+ # You could set the system_instruction and tools
+ system_instruction=None,
+ tools=None,
+ contents=["Here the Apollo 11 transcript:", your_file],
)
- print(genai.GenerativeModel().count_tokens(cache))
+
+ model = genai.GenerativeModel.from_cached_content(cache)
+
+ # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
+ # A video or audio file is converted to tokens at a fixed rate of tokens per second.
+ # Optionally, you can call `count_tokens` for the prompt and file separately.
+ prompt = "Please give a short summary of this file."
+ print(model.count_tokens(prompt))
+
+ response = model.generate_content(prompt)
+ # Use `usage_metadata` to get both input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively).
+ print(response.usage_metadata)
+
+ cache.delete()
# [END tokens_cached_content]
- cache.delete() # Clear
+
+ # [START tokens_cached_content_return]
+ # total_tokens: 9
+ #
+ # prompt_token_count: 323393
+ # cached_content_token_count: 323383
+ # candidates_token_count: 64
+ # total_token_count: 323457
+ # [END tokens_cached_content_return]
def test_tokens_system_instruction(self):
# [START tokens_system_instruction]
- document = genai.upload_file(path=media / "a11.txt")
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash")
+
+ # The total token count includes everything sent to the generate_content request.
+ print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
+ # total_tokens: 10
+
model = genai.GenerativeModel(
- "models/gemini-1.5-flash-001",
- system_instruction="You are an expert analyzing transcripts. Give a summary of this document.",
+ model_name="gemini-1.5-flash", system_instruction="You are a cat. Your name is Neko."
)
- print(model.count_tokens(document))
+
+ # The total token count includes everything sent to the generate_content request.
+ # When you use system instructions, the total token count increases.
+ print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
# [END tokens_system_instruction]
+ # [START tokens_system_instruction_return]
+ # total_tokens: 10
+ #
+ # total_tokens: 21
+ # [END tokens_system_instruction_return]
+
def test_tokens_tools(self):
# [START tokens_tools]
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash")
+
+ # The total token count includes everything sent to the generate_content request.
+ print(
+ model.count_tokens(
+ "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
+ )
+ )
+ # total_tokens: 10
+
def add(a: float, b: float):
"""returns a + b."""
return a + b
@@ -117,6 +289,12 @@ def divide(a: float, b: float):
)
# [END tokens_tools]
+ # [START tokens_tools_return]
+ # total_tokens: 22
+ #
+ # total_tokens: 206
+ # [END tokens_tools_return]
+
if __name__ == "__main__":
absltest.main()
From cc2a3b720bdc97676fe4db762cd81eab2aeed743 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Tue, 9 Jul 2024 14:42:38 -0700
Subject: [PATCH 03/90] Fix PIL.Image imports. (#447)
---
samples/text_generation.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/samples/text_generation.py b/samples/text_generation.py
index a5e800c75..c4d6adccb 100644
--- a/samples/text_generation.py
+++ b/samples/text_generation.py
@@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import PIL.Image
from absl.testing import absltest
import google.generativeai as genai
@@ -40,7 +39,7 @@ def test_text_gen_text_only_prompt_streaming(self):
def test_text_gen_multimodal_one_image_prompt(self):
# [START text_gen_multimodal_one_image_prompt]
- import PIL
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -50,7 +49,7 @@ def test_text_gen_multimodal_one_image_prompt(self):
def test_text_gen_multimodal_one_image_prompt_streaming(self):
# [START text_gen_multimodal_one_image_prompt_streaming]
- import PIL
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -62,7 +61,7 @@ def test_text_gen_multimodal_one_image_prompt_streaming(self):
def test_text_gen_multimodal_multi_image_prompt(self):
# [START text_gen_multimodal_multi_image_prompt]
- import PIL
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -75,7 +74,7 @@ def test_text_gen_multimodal_multi_image_prompt(self):
def test_text_gen_multimodal_multi_image_prompt_streaming(self):
# [START text_gen_multimodal_multi_image_prompt_streaming]
- import PIL
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
From 0d51b2619889cfb2dad08b90d57c89c3f0a8244f Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Wed, 10 Jul 2024 13:34:01 -0700
Subject: [PATCH 04/90] Add code execution python sample (#451)
* Add code execution python sample
* Sync with docs.
* format
Change-Id: Id75a4c1936a13a63f1e22f36d5b7011c24e31233
---------
Co-authored-by: Mark Daoust
---
samples/code_execution.py | 166 ++++++++++++++++++++++++++++++++++++++
1 file changed, 166 insertions(+)
create mode 100644 samples/code_execution.py
diff --git a/samples/code_execution.py b/samples/code_execution.py
new file mode 100644
index 000000000..cd82d676d
--- /dev/null
+++ b/samples/code_execution.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from absl.testing import absltest
+
+import google.generativeai as genai
+
+
+class UnitTests(absltest.TestCase):
+ def test_code_execution_basic(self):
+ # [START code_execution_basic]
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools="code_execution")
+ response = model.generate_content(
+ (
+ "What is the sum of the first 50 prime numbers? "
+ "Generate and run code for the calculation, and make sure you get all 50."
+ )
+ )
+ print(response.text)
+ # [END code_execution_basic]
+ # [START code_execution_basic_return]
+ # ``` python
+ # def is_prime(n):
+ # """
+ # Checks if a number is prime.
+ # """
+ # if n <= 1:
+ # return False
+ # for i in range(2, int(n**0.5) + 1):
+ # if n % i == 0:
+ # return False
+ # return True
+ #
+ # primes = []
+ # num = 2
+ # count = 0
+ # while count < 50:
+ # if is_prime(num):
+ # primes.append(num)
+ # count += 1
+ # num += 1
+ #
+ # print(f'The first 50 prime numbers are: {primes}')
+ # print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
+ #
+ # ```
+ # ```
+ # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
+ # The sum of the first 50 prime numbers is: 5117
+ #
+ # ```
+ # The code generated a list of the first 50 prime numbers, then sums the list to find the answer.
+ #
+ # The sum of the first 50 prime numbers is **5117**.
+ # [END code_execution_basic_return]
+
+ def test_code_execution_request_override(self):
+ # [START code_execution_request_override]
+ model = genai.GenerativeModel(model_name="gemini-1.5-pro")
+ response = model.generate_content(
+ (
+ "What is the sum of the first 50 prime numbers? "
+ "Generate and run code for the calculation, and make sure you get all 50."
+ ),
+ tools="code_execution",
+ )
+ print(response.text)
+ # [END code_execution_request_override]
+ # [START code_execution_request_override_return]
+ # ``` python
+ # def is_prime(n):
+ # """
+ # Checks if a number is prime.
+ # """
+ # if n <= 1:
+ # return False
+ # for i in range(2, int(n**0.5) + 1):
+ # if n % i == 0:
+ # return False
+ # return True
+ #
+ # primes = []
+ # num = 2
+ # count = 0
+ # while count < 50:
+ # if is_prime(num):
+ # primes.append(num)
+ # count += 1
+ # num += 1
+ #
+ # print(f'The first 50 prime numbers are: {primes}')
+ # print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
+ #
+ # ```
+ # ```
+ # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
+ # The sum of the first 50 prime numbers is: 5117
+ #
+ # ```
+ # The code generated a list of the first 50 prime numbers, then sums the list to find the answer.
+ #
+ # The sum of the first 50 prime numbers is **5117**.
+ # [END code_execution_request_override_return]
+
+ def test_code_execution_chat(self):
+ # [START code_execution_chat]
+ model = genai.GenerativeModel(model_name="gemini-1.5-pro", tools="code_execution")
+ chat = model.start_chat()
+ response = chat.send_message(
+ (
+ "What is the sum of the first 50 prime numbers? "
+ "Generate and run code for the calculation, and make sure you get all 50."
+ )
+ )
+ print(response.text)
+ # [END code_execution_chat]
+ # [START code_execution_chat_return]
+ # ``` python
+ # def is_prime(n):
+ # """
+ # Checks if a number is prime.
+ # """
+ # if n <= 1:
+ # return False
+ # for i in range(2, int(n**0.5) + 1):
+ # if n % i == 0:
+ # return False
+ # return True
+ #
+ # primes = []
+ # num = 2
+ # count = 0
+ # while count < 50:
+ # if is_prime(num):
+ # primes.append(num)
+ # count += 1
+ # num += 1
+ #
+ # print(f'The first 50 prime numbers are: {primes}')
+ # print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
+ #
+ # ```
+ # ```
+ # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
+ # The sum of the first 50 prime numbers is: 5117
+ #
+ # ```
+ # The code generated a list of the first 50 prime numbers, then sums the list to find the answer.
+ #
+ # The sum of the first 50 prime numbers is **5117**.
+ # [END code_execution_chat_return]
+
+
+if __name__ == "__main__":
+ absltest.main()
From 712e0e4b167fe94c5fa6dc1cad2bb4975ce766d4 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Wed, 10 Jul 2024 14:48:40 -0700
Subject: [PATCH 05/90] Update example to show part types. (#452)
---
samples/code_execution.py | 51 +++++++++++++++++++++++++++++----------
1 file changed, 38 insertions(+), 13 deletions(-)
diff --git a/samples/code_execution.py b/samples/code_execution.py
index cd82d676d..17f8142a0 100644
--- a/samples/code_execution.py
+++ b/samples/code_execution.py
@@ -27,9 +27,38 @@ def test_code_execution_basic(self):
"Generate and run code for the calculation, and make sure you get all 50."
)
)
- print(response.text)
+
+ # Each `part` either contains `text`, `executable_code` or an `execution_result`
+ for part in result.candidates[0].content.parts:
+ print(part, '\n')
+
+ print('-'*80)
+ # The `.text` accessor joins the parts into a markdown compatible text representation.
+ print('\n\n', response.text)
# [END code_execution_basic]
+
# [START code_execution_basic_return]
+ # text: "I can help with that! To calculate the sum of the first 50 prime numbers, we\'ll need to first identify all the prime numbers up to the 50th prime number. \n\nHere is the code to find and sum the first 50 prime numbers:\n\n"
+ #
+ # executable_code {
+ # language: PYTHON
+ # code: "\ndef is_prime(n):\n \"\"\"\n Checks if a number is prime.\n \"\"\"\n if n <= 1:\n return False\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\nprime_count = 0\nnumber = 2\nprimes = []\nwhile prime_count < 50:\n if is_prime(number):\n primes.append(number)\n prime_count += 1\n number += 1\n\nprint(f\'The sum of the first 50 prime numbers is: {sum(primes)}\')\n"
+ # }
+ #
+ # code_execution_result {
+ # outcome: OUTCOME_OK
+ # output: "The sum of the first 50 prime numbers is: 5117\n"
+ # }
+ #
+ # text: "I ran the code and it calculated that the sum of the first 50 prime numbers is 5117. \n"
+ #
+ #
+ # --------------------------------------------------------------------------------
+ # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number.
+ #
+ # Here is the code to find and sum the first 50 prime numbers:
+ #
+ #
# ``` python
# def is_prime(n):
# """
@@ -42,27 +71,23 @@ def test_code_execution_basic(self):
# return False
# return True
#
+ # prime_count = 0
+ # number = 2
# primes = []
- # num = 2
- # count = 0
- # while count < 50:
- # if is_prime(num):
- # primes.append(num)
- # count += 1
- # num += 1
+ # while prime_count < 50:
+ # if is_prime(number):
+ # primes.append(number)
+ # prime_count += 1
+ # number += 1
#
- # print(f'The first 50 prime numbers are: {primes}')
# print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
#
# ```
# ```
- # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
# The sum of the first 50 prime numbers is: 5117
#
# ```
- # The code generated a list of the first 50 prime numbers, then sums the list to find the answer.
- #
- # The sum of the first 50 prime numbers is **5117**.
+ # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117.
# [END code_execution_basic_return]
def test_code_execution_request_override(self):
From 351daadb83455eef375e86a4ec742c948a194dae Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Thu, 11 Jul 2024 09:27:48 -0700
Subject: [PATCH 06/90] move model_configuration samples (#454)
Change-Id: I4c0f02a52e9c63d5ee72874f24904c1c931cb4cb
---
samples/{model_configuration.py => configure_model_parameters.py} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename samples/{model_configuration.py => configure_model_parameters.py} (100%)
diff --git a/samples/model_configuration.py b/samples/configure_model_parameters.py
similarity index 100%
rename from samples/model_configuration.py
rename to samples/configure_model_parameters.py
From d74189f4ce04757662586895dedca775e7aefecb Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Thu, 11 Jul 2024 09:36:30 -0700
Subject: [PATCH 07/90] Update configure_model_parameters.py
---
samples/configure_model_parameters.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/samples/configure_model_parameters.py b/samples/configure_model_parameters.py
index 54aec9763..ecd39d312 100644
--- a/samples/configure_model_parameters.py
+++ b/samples/configure_model_parameters.py
@@ -19,7 +19,7 @@
class UnitTests(absltest.TestCase):
def test_configure_model(self):
- # [START configure_model]
+ # [START configure_model_parameters]
model = genai.GenerativeModel("gemini-1.5-flash")
response = model.generate_content(
"Tell me a story about a magic backpack.",
@@ -33,7 +33,7 @@ def test_configure_model(self):
)
print(response.text)
- # [END configure_model]
+ # [END configure_model_parameters]
if __name__ == "__main__":
From 8642c8c31009e80543803ff2142fb31c9f1ed838 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Thu, 11 Jul 2024 10:03:11 -0700
Subject: [PATCH 08/90] move model_configuration samples (#456)
Change-Id: Ic60ecebc3cc9a2f4455054dcdb769e338331e8e1
---
...model_configuration.sh => configure_model_parameters.sh} | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
rename samples/rest/{model_configuration.sh => configure_model_parameters.sh} (87%)
diff --git a/samples/rest/model_configuration.sh b/samples/rest/configure_model_parameters.sh
similarity index 87%
rename from samples/rest/model_configuration.sh
rename to samples/rest/configure_model_parameters.sh
index ba10ff813..bd8d9d4c6 100644
--- a/samples/rest/model_configuration.sh
+++ b/samples/rest/configure_model_parameters.sh
@@ -1,7 +1,7 @@
set -eu
-echo "[START configure_model]"
-# [START configure_model]
+echo "[START configure_model_parameters]"
+# [START configure_model_parameters]
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \
-H 'Content-Type: application/json' \
-X POST \
@@ -27,4 +27,4 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:ge
"topK": 10
}
}' 2> /dev/null | grep "text"
-# [END configure_model]
+# [END configure_model_parameters]
From 7c2148642e3c50f642b2041073172f11b221f532 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Thu, 11 Jul 2024 11:08:28 -0700
Subject: [PATCH 09/90] Format code execution (#457)
---
samples/code_execution.py | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/samples/code_execution.py b/samples/code_execution.py
index 17f8142a0..6b5c97dc4 100644
--- a/samples/code_execution.py
+++ b/samples/code_execution.py
@@ -27,14 +27,14 @@ def test_code_execution_basic(self):
"Generate and run code for the calculation, and make sure you get all 50."
)
)
-
+
# Each `part` either contains `text`, `executable_code` or an `execution_result`
for part in result.candidates[0].content.parts:
- print(part, '\n')
+ print(part, "\n")
- print('-'*80)
- # The `.text` accessor joins the parts into a markdown compatible text representation.
- print('\n\n', response.text)
+ print("-" * 80)
+ # The `.text` accessor joins the parts into a markdown compatible text representation.
+ print("\n\n", response.text)
# [END code_execution_basic]
# [START code_execution_basic_return]
@@ -54,7 +54,7 @@ def test_code_execution_basic(self):
#
#
# --------------------------------------------------------------------------------
- # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number.
+ # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number.
#
# Here is the code to find and sum the first 50 prime numbers:
#
@@ -87,7 +87,7 @@ def test_code_execution_basic(self):
# The sum of the first 50 prime numbers is: 5117
#
# ```
- # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117.
+ # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117.
# [END code_execution_basic_return]
def test_code_execution_request_override(self):
From 950a666abd4f19c53b9a71a06a16b3af026fdc75 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Thu, 11 Jul 2024 11:20:08 -0700
Subject: [PATCH 10/90] Chat REST samples (#449)
* Add first chat samples for rest
* Add Chat rest examples
* last message should be 'role:user'
Change-Id: I3e06e9e0ffb553cfc70add5ed0365cb56e9fddff
---------
Co-authored-by: Mark Daoust
---
samples/rest/chat.sh | 93 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 93 insertions(+)
create mode 100644 samples/rest/chat.sh
diff --git a/samples/rest/chat.sh b/samples/rest/chat.sh
new file mode 100644
index 000000000..d5af4cfb5
--- /dev/null
+++ b/samples/rest/chat.sh
@@ -0,0 +1,93 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+echo "[START chat]"
+# [START chat]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {"role":"user",
+ "parts":[{
+ "text": "Hello"}]},
+ {"role": "model",
+ "parts":[{
+ "text": "Great to meet you. What would you like to know?"}]},
+ {"role":"user",
+ "parts":[{
+ "text": "I have two dogs in my house. How many paws are in my house?"}]},
+ ]
+ }' 2> /dev/null | grep "text"
+# [END chat]
+
+echo "[START chat_streaming]"
+# [START chat_streaming]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {"role":"user",
+ "parts":[{
+ "text": "Hello"}]},
+ {"role": "model",
+ "parts":[{
+ "text": "Great to meet you. What would you like to know?"}]},
+ {"role":"user",
+ "parts":[{
+ "text": "I have two dogs in my house. How many paws are in my house?"}]},
+ ]
+ }' 2> /dev/null | grep "text"
+# [END chat_streaming]
+
+echo "[START chat_streaming_with_images]"
+# [START chat_streaming_with_images]
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {
+ "role": "user",
+ "parts": [
+ {
+ "text": "Hello, I am interested in learning about musical instruments. Can I show you one?"
+ }
+ ]
+ },
+ {
+ "role": "model",
+ "parts": [
+ {
+ "text": "Certainly."
+ },
+ ]
+ },
+ {
+ "role": "user",
+ "parts": [
+ {
+ "text": "Tell me about this instrument"
+ },
+ {
+ "inline_data": {
+ "mime_type": "image/jpeg",
+ "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
+ }
+ }
+ ]
+ }
+ ]
+ }' 2> /dev/null | grep "text"
+# [END chat_streaming_with_images]
\ No newline at end of file
From 754d038d14cd1587c8143e41dfdde9741f0e7eba Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Thu, 11 Jul 2024 11:38:09 -0700
Subject: [PATCH 11/90] Update tuned_models.py (#458)
---
samples/tuned_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/samples/tuned_models.py b/samples/tuned_models.py
index 29246347d..d328d8c30 100644
--- a/samples/tuned_models.py
+++ b/samples/tuned_models.py
@@ -77,7 +77,7 @@ def test_tuned_models_generate_content(self):
model = genai.GenerativeModel(model_name="tunedModels/my-increment-model")
result = model.generate_content("III")
print(result.text) # "IV"
- # [END tuned_models_create]
+ # [END tuned_models_generate_content]
def test_tuned_models_get(self):
# [START tuned_models_get]
From 4e1dcd6f45f90937ef921be1e2cf1b316f4f0bf7 Mon Sep 17 00:00:00 2001
From: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com>
Date: Fri, 12 Jul 2024 16:44:11 +0200
Subject: [PATCH 12/90] Update count_tokens.py (#459)
- integrated returns into main snippet
- updated code comments
- pulled text of prompts out of the requests to generate_content
---
samples/count_tokens.py | 188 ++++++++++++++++------------------------
1 file changed, 77 insertions(+), 111 deletions(-)
diff --git a/samples/count_tokens.py b/samples/count_tokens.py
index 827fe5f1d..a45457e38 100644
--- a/samples/count_tokens.py
+++ b/samples/count_tokens.py
@@ -24,16 +24,14 @@ class UnitTests(absltest.TestCase):
def test_tokens_context_window(self):
# [START tokens_context_window]
model_info = genai.get_model("models/gemini-1.0-pro-001")
- # Returns the "context window" for the model (the combined input and output token limits)
+
+ # Returns the "context window" for the model,
+ # which is the combined input and output token limits.
print(f"{model_info.input_token_limit=}")
print(f"{model_info.output_token_limit=}")
+ # ( input_token_limit=30720, output_token_limit=2048 )
# [END tokens_context_window]
- # [START tokens_context_window_return]
- # input_token_limit=30720
- # output_token_limit=2048
- # [END tokens_context_window_return]
-
def test_tokens_text_only(self):
# [START tokens_text_only]
model = genai.GenerativeModel("models/gemini-1.5-flash")
@@ -42,22 +40,18 @@ def test_tokens_text_only(self):
# Call `count_tokens` to get the input token count (`total_tokens`).
print("total_tokens: ", model.count_tokens(prompt))
+ # ( total_tokens: 10 )
response = model.generate_content(prompt)
- # Use `usage_metadata` to get both input and output token counts
- # (`prompt_token_count` and `candidates_token_count`, respectively).
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
print(response.usage_metadata)
+ # ( prompt_token_count: 11, candidates_token_count: 73, total_token_count: 84 )
# [END tokens_text_only]
- # [START tokens_text_only_return]
- # total_tokens: total_tokens: 10
- #
- # prompt_token_count: 11
- # candidates_token_count: 73
- # total_token_count: 84
- # [END tokens_text_only_return]
-
def test_tokens_chat(self):
# [START tokens_chat]
model = genai.GenerativeModel("models/gemini-1.5-flash")
@@ -70,30 +64,26 @@ def test_tokens_chat(self):
)
# Call `count_tokens` to get the input token count (`total_tokens`).
print(model.count_tokens(chat.history))
+ # ( total_tokens: 10 )
response = chat.send_message(
"In one sentence, explain how a computer works to a young child."
)
- # Use `usage_metadata` to get both input and output token counts
- # (`prompt_token_count` and `candidates_token_count`, respectively).
+
+ # On the response for `send_message`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
print(response.usage_metadata)
+ # ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )
- # TODO add comment...
from google.generativeai.types.content_types import to_contents
+ # You can call `count_tokens` on the combined history and content of the next turn.
print(model.count_tokens(chat.history + to_contents("What is the meaning of life?")))
+ # ( total_tokens: 56 )
# [END tokens_chat]
- # [START tokens_chat_return]
- # total_tokens: 10
- #
- # prompt_token_count: 25
- # candidates_token_count: 21
- # total_token_count: 46
- #
- # total_tokens: 56
- # [END tokens_chat_return]
-
def test_tokens_multimodal_image_inline(self):
# [START tokens_multimodal_image_inline]
import PIL.Image
@@ -103,25 +93,23 @@ def test_tokens_multimodal_image_inline(self):
prompt = "Tell me about this image"
your_image_file = PIL.Image.open("image.jpg")
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
- # An image's display size does not affect its token count.
- # Optionally, you can call `count_tokens` for the prompt and file separately.
+ # Call `count_tokens` to get the input token count
+ # of the combined text and file (`total_tokens`).
+ # An image's display or file size does not affect its token count.
+ # Optionally, you can call `count_tokens` for the text and file separately.
print(model.count_tokens([prompt, your_image_file]))
+ # ( total_tokens: 263 )
response = model.generate_content([prompt, your_image_file])
- # Use `usage_metadata` to get both input and output token counts
- # (`prompt_token_count` and `candidates_token_count`, respectively).
+
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
print(response.usage_metadata)
+ # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
# [END tokens_multimodal_image_inline]
- # [START tokens_multimodal_image_inline_return]
- # total_tokens: 263
- #
- # prompt_token_count: 264
- # candidates_token_count: 81
- # total_token_count: 345
- # [END tokens_multimodal_image_inline_return]
-
def test_tokens_multimodal_image_file_api(self):
# [START tokens_multimodal_image_file_api]
model = genai.GenerativeModel("models/gemini-1.5-flash")
@@ -129,26 +117,23 @@ def test_tokens_multimodal_image_file_api(self):
prompt = "Tell me about this image"
your_image_file = genai.upload_file(path="image.jpg")
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
- # An image's display size does not affect its token count.
- # Optionally, you can call `count_tokens` for the prompt and file separately.
+ # Call `count_tokens` to get the input token count
+ # of the combined text and file (`total_tokens`).
+ # An image's display or file size does not affect its token count.
+ # Optionally, you can call `count_tokens` for the text and file separately.
print(model.count_tokens([prompt, your_image_file]))
+ # ( total_tokens: 263 )
response = model.generate_content([prompt, your_image_file])
response.text
- # Use `usage_metadata` to get both input and output token counts
- # (`prompt_token_count` and `candidates_token_count`, respectively).
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
print(response.usage_metadata)
+ # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
# [END tokens_multimodal_image_file_api]
- # [START tokens_multimodal_image_file_api_return]
- # total_tokens: 263
- #
- # prompt_token_count: 264
- # candidates_token_count: 80
- # total_token_count: 344
- # [END tokens_multimodal_image_file_api_return]
-
def test_tokens_multimodal_video_audio_file_api(self):
# [START tokens_multimodal_video_audio_file_api]
import time
@@ -164,28 +149,24 @@ def test_tokens_multimodal_video_audio_file_api(self):
time.sleep(5)
your_file = genai.get_file(your_file.name)
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
+ # Call `count_tokens` to get the input token count
+ # of the combined text and video/audio file (`total_tokens`).
# A video or audio file is converted to tokens at a fixed rate of tokens per second.
- # Optionally, you can call `count_tokens` for the prompt and file separately.
+ # Optionally, you can call `count_tokens` for the text and file separately.
print(model.count_tokens([prompt, your_file]))
+ # ( total_tokens: 300 )
response = model.generate_content([prompt, your_file])
- # Use `usage_metadata` to get both input and output token counts
- # (`prompt_token_count` and `candidates_token_count`, respectively).
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
print(response.usage_metadata)
+ # ( prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 )
# [END tokens_multimodal_video_audio_file_api]
- # [START tokens_multimodal_video_audio_file_api_return]
- # processing video...
- # total_tokens: 300
- #
- # prompt_token_count: 301
- # candidates_token_count: 60
- # total_token_count: 361
- # [END tokens_multimodal_video_audio_file_api_return]
-
def test_tokens_cached_content(self):
# [START tokens_cached_content]
import time
@@ -196,7 +177,7 @@ def test_tokens_cached_content(self):
cache = genai.caching.CachedContent.create(
model="models/gemini-1.5-flash-001",
- # You could set the system_instruction and tools
+ # You can set the system_instruction and tools
system_instruction=None,
tools=None,
contents=["Here the Apollo 11 transcript:", your_file],
@@ -204,63 +185,55 @@ def test_tokens_cached_content(self):
model = genai.GenerativeModel.from_cached_content(cache)
- # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`).
- # A video or audio file is converted to tokens at a fixed rate of tokens per second.
- # Optionally, you can call `count_tokens` for the prompt and file separately.
prompt = "Please give a short summary of this file."
+
+ # Call `count_tokens` to get input token count
+ # of the combined text and file (`total_tokens`).
+ # A video or audio file is converted to tokens at a fixed rate of tokens per second.
+ # Optionally, you can call `count_tokens` for the text and file separately.
print(model.count_tokens(prompt))
+ # ( total_tokens: 9 )
response = model.generate_content(prompt)
- # Use `usage_metadata` to get both input and output token counts
- # (`prompt_token_count` and `candidates_token_count`, respectively).
+
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the cached content token count and the combined total token count.
print(response.usage_metadata)
+ # ( prompt_token_count: 323393, cached_content_token_count: 323383, candidates_token_count: 64)
+ # ( total_token_count: 323457 )
cache.delete()
# [END tokens_cached_content]
- # [START tokens_cached_content_return]
- # total_tokens: 9
- #
- # prompt_token_count: 323393
- # cached_content_token_count: 323383
- # candidates_token_count: 64
- # total_token_count: 323457
- # [END tokens_cached_content_return]
-
def test_tokens_system_instruction(self):
# [START tokens_system_instruction]
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
- # The total token count includes everything sent to the generate_content request.
- print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
+ prompt="The quick brown fox jumps over the lazy dog."
+
+ print(model.count_tokens(prompt))
# total_tokens: 10
model = genai.GenerativeModel(
model_name="gemini-1.5-flash", system_instruction="You are a cat. Your name is Neko."
)
- # The total token count includes everything sent to the generate_content request.
+ # The total token count includes everything sent to the `generate_content` request.
# When you use system instructions, the total token count increases.
- print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 21 )
# [END tokens_system_instruction]
- # [START tokens_system_instruction_return]
- # total_tokens: 10
- #
- # total_tokens: 21
- # [END tokens_system_instruction_return]
-
def test_tokens_tools(self):
# [START tokens_tools]
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
- # The total token count includes everything sent to the generate_content request.
- print(
- model.count_tokens(
- "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
- )
- )
- # total_tokens: 10
+ prompt="I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
+
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 22 )
def add(a: float, b: float):
"""returns a + b."""
@@ -282,19 +255,12 @@ def divide(a: float, b: float):
"models/gemini-1.5-flash-001", tools=[add, subtract, multiply, divide]
)
- print(
- model.count_tokens(
- "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
- )
- )
+ # The total token count includes everything sent to the `generate_content` request.
+ # When you use tools (like function calling), the total token count increases.
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 206 )
# [END tokens_tools]
- # [START tokens_tools_return]
- # total_tokens: 22
- #
- # total_tokens: 206
- # [END tokens_tools_return]
-
if __name__ == "__main__":
absltest.main()
From 8494231be1d9220e0ce8cda82d4f9a3209afb375 Mon Sep 17 00:00:00 2001
From: Guillaume Vernade
Date: Wed, 17 Jul 2024 18:24:27 +0200
Subject: [PATCH 13/90] Formatting (using black) (#460)
---
samples/count_tokens.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/samples/count_tokens.py b/samples/count_tokens.py
index a45457e38..beae3b288 100644
--- a/samples/count_tokens.py
+++ b/samples/count_tokens.py
@@ -69,7 +69,7 @@ def test_tokens_chat(self):
response = chat.send_message(
"In one sentence, explain how a computer works to a young child."
)
-
+
# On the response for `send_message`, use `usage_metadata`
# to get separate input and output token counts
# (`prompt_token_count` and `candidates_token_count`, respectively),
@@ -195,7 +195,7 @@ def test_tokens_cached_content(self):
# ( total_tokens: 9 )
response = model.generate_content(prompt)
-
+
# On the response for `generate_content`, use `usage_metadata`
# to get separate input and output token counts
# (`prompt_token_count` and `candidates_token_count`, respectively),
@@ -211,7 +211,7 @@ def test_tokens_system_instruction(self):
# [START tokens_system_instruction]
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
- prompt="The quick brown fox jumps over the lazy dog."
+ prompt = "The quick brown fox jumps over the lazy dog."
print(model.count_tokens(prompt))
# total_tokens: 10
@@ -230,7 +230,7 @@ def test_tokens_tools(self):
# [START tokens_tools]
model = genai.GenerativeModel(model_name="gemini-1.5-flash")
- prompt="I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
+ prompt = "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
print(model.count_tokens(prompt))
# ( total_tokens: 22 )
From 3491bfc3b91f1b374193aa3748cb31d5c78db554 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Wed, 17 Jul 2024 09:35:15 -0700
Subject: [PATCH 14/90] Adding count_tokens for rest (#444)
* Adding count_tokens for rest
* Update to have same prompt as python example
* tests now working
---
samples/rest/count_tokens.sh | 32 ++++++++++++++++++++++++++++++++
1 file changed, 32 insertions(+)
create mode 100644 samples/rest/count_tokens.sh
diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh
new file mode 100644
index 000000000..867e787b8
--- /dev/null
+++ b/samples/rest/count_tokens.sh
@@ -0,0 +1,32 @@
+set -eu
+
+echo "[START tokens_text_only]"
+# [START tokens_text_only]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[{
+ "text": "The quick brown fox jumps over the lazy dog."
+ }],
+ }],
+ }'
+# [END tokens_text_only]
+
+echo "[START tokens_chat]"
+# [START tokens_chat]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {"role": "user",
+ "parts": [{"text": "Hi, my name is Bob."}],
+ },
+ {"role": "model",
+ "parts":[{"text": "Hi Bob"}],
+ },
+ ],
+ }'
+# [END tokens_chat]
\ No newline at end of file
From e8ad6533b8d3e2cf2308fd092114a91951708240 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Wed, 17 Jul 2024 09:42:13 -0700
Subject: [PATCH 15/90] add safety settings examples for curl (#433)
* add safety settings examples for curl
* replace integers with category name
---
samples/rest/safety_settings.sh | 38 +++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
create mode 100644 samples/rest/safety_settings.sh
diff --git a/samples/rest/safety_settings.sh b/samples/rest/safety_settings.sh
new file mode 100644
index 000000000..f7eb45186
--- /dev/null
+++ b/samples/rest/safety_settings.sh
@@ -0,0 +1,38 @@
+set -eu
+
+echo "[START safety_settings]"
+# [START safety_settings]
+echo '{
+ "safetySettings": [
+ {'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH}
+ ],
+ "contents": [{
+ "parts":[{
+ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
+
+ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d @request.json 2> /dev/null > tee response.json
+
+ jq .promptFeedback > response.json
+# [END safety_settings]
+
+echo "[START safety_settings_multi]"
+# [START safety_settings_multi]
+echo '{
+ "safetySettings": [
+ {'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH},
+ {'category': HARM_CATEGORY_HATE_SPEECH, 'threshold': BLOCK_MEDIUM_AND_ABOVE}
+ ],
+ "contents": [{
+ "parts":[{
+ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
+
+ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d @request.json 2> /dev/null > response.json
+
+ jq .promptFeedback > response.json
+# [END safety_settings_multi]
From c5ef6c046dece66e723379c4ada41c04622ce374 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Wed, 17 Jul 2024 16:35:30 -0700
Subject: [PATCH 16/90] Add markdown docs (#462)
Change-Id: I63ffaa1c0d4af92f4a630ea21c99f927095c1d34
---
.gitignore | 1 -
docs/api/google/generativeai.md | 138 +
docs/api/google/generativeai/ChatSession.md | 222 +
.../google/generativeai/GenerativeModel.md | 429 +
docs/api/google/generativeai/_api_cache.json | 9886 +++++++++++++++++
docs/api/google/generativeai/_redirects.yaml | 13 +
docs/api/google/generativeai/_toc.yaml | 507 +
docs/api/google/generativeai/all_symbols.md | 261 +
docs/api/google/generativeai/api_report.pb | Bin 0 -> 49595 bytes
docs/api/google/generativeai/chat.md | 198 +
docs/api/google/generativeai/chat_async.md | 198 +
docs/api/google/generativeai/configure.md | 80 +
.../generativeai/count_message_tokens.md | 41 +
.../google/generativeai/count_text_tokens.md | 37 +
.../google/generativeai/create_tuned_model.md | 198 +
docs/api/google/generativeai/delete_file.md | 34 +
.../google/generativeai/delete_tuned_model.md | 36 +
docs/api/google/generativeai/embed_content.md | 112 +
.../generativeai/embed_content_async.md | 40 +
.../generativeai/generate_embeddings.md | 90 +
docs/api/google/generativeai/generate_text.md | 172 +
.../api/google/generativeai/get_base_model.md | 87 +
docs/api/google/generativeai/get_file.md | 34 +
docs/api/google/generativeai/get_model.md | 87 +
docs/api/google/generativeai/get_operation.md | 34 +
.../google/generativeai/get_tuned_model.md | 87 +
docs/api/google/generativeai/list_files.md | 34 +
docs/api/google/generativeai/list_models.md | 87 +
.../google/generativeai/list_operations.md | 34 +
.../google/generativeai/list_tuned_models.md | 87 +
docs/api/google/generativeai/protos.md | 368 +
.../protos/AttributionSourceId.md | 73 +
.../AttributionSourceId/GroundingPassageId.md | 59 +
.../SemanticRetrieverChunk.md | 60 +
.../protos/BatchCreateChunksRequest.md | 62 +
.../protos/BatchCreateChunksResponse.md | 48 +
.../protos/BatchDeleteChunksRequest.md | 61 +
.../protos/BatchEmbedContentsRequest.md | 65 +
.../protos/BatchEmbedContentsResponse.md | 50 +
.../protos/BatchEmbedTextRequest.md | 71 +
.../protos/BatchEmbedTextResponse.md | 49 +
.../protos/BatchUpdateChunksRequest.md | 62 +
.../protos/BatchUpdateChunksResponse.md | 48 +
docs/api/google/generativeai/protos/Blob.md | 64 +
.../generativeai/protos/CachedContent.md | 181 +
.../protos/CachedContent/UsageMetadata.md | 49 +
.../google/generativeai/protos/Candidate.md | 123 +
.../protos/Candidate/FinishReason.md | 733 ++
docs/api/google/generativeai/protos/Chunk.md | 108 +
.../google/generativeai/protos/Chunk/State.md | 696 ++
.../google/generativeai/protos/ChunkData.md | 51 +
.../generativeai/protos/CitationMetadata.md | 48 +
.../generativeai/protos/CitationSource.md | 88 +
.../generativeai/protos/CodeExecution.md | 29 +
.../protos/CodeExecutionResult.md | 65 +
.../protos/CodeExecutionResult/Outcome.md | 699 ++
.../google/generativeai/protos/Condition.md | 80 +
.../generativeai/protos/Condition/Operator.md | 782 ++
.../api/google/generativeai/protos/Content.md | 64 +
.../generativeai/protos/ContentEmbedding.md | 48 +
.../generativeai/protos/ContentFilter.md | 67 +
docs/api/google/generativeai/protos/Corpus.md | 87 +
.../protos/CountMessageTokensRequest.md | 66 +
.../protos/CountMessageTokensResponse.md | 52 +
.../protos/CountTextTokensRequest.md | 66 +
.../protos/CountTextTokensResponse.md | 52 +
.../generativeai/protos/CountTokensRequest.md | 77 +
.../protos/CountTokensResponse.md | 64 +
.../protos/CreateCachedContentRequest.md | 48 +
.../generativeai/protos/CreateChunkRequest.md | 59 +
.../protos/CreateCorpusRequest.md | 48 +
.../protos/CreateDocumentRequest.md | 58 +
.../generativeai/protos/CreateFileRequest.md | 48 +
.../generativeai/protos/CreateFileResponse.md | 48 +
.../protos/CreatePermissionRequest.md | 58 +
.../protos/CreateTunedModelMetadata.md | 86 +
.../protos/CreateTunedModelRequest.md | 62 +
.../generativeai/protos/CustomMetadata.md | 87 +
.../api/google/generativeai/protos/Dataset.md | 50 +
.../protos/DeleteCachedContentRequest.md | 49 +
.../generativeai/protos/DeleteChunkRequest.md | 50 +
.../protos/DeleteCorpusRequest.md | 62 +
.../protos/DeleteDocumentRequest.md | 62 +
.../generativeai/protos/DeleteFileRequest.md | 49 +
.../protos/DeletePermissionRequest.md | 50 +
.../protos/DeleteTunedModelRequest.md | 49 +
.../google/generativeai/protos/Document.md | 99 +
.../protos/EmbedContentRequest.md | 103 +
.../protos/EmbedContentResponse.md | 49 +
.../generativeai/protos/EmbedTextRequest.md | 59 +
.../generativeai/protos/EmbedTextResponse.md | 50 +
.../google/generativeai/protos/Embedding.md | 48 +
.../api/google/generativeai/protos/Example.md | 60 +
.../generativeai/protos/ExecutableCode.md | 64 +
.../protos/ExecutableCode/Language.md | 663 ++
docs/api/google/generativeai/protos/File.md | 164 +
.../google/generativeai/protos/File/State.md | 698 ++
.../google/generativeai/protos/FileData.md | 58 +
.../generativeai/protos/FunctionCall.md | 62 +
.../protos/FunctionCallingConfig.md | 69 +
.../protos/FunctionCallingConfig/Mode.md | 704 ++
.../protos/FunctionDeclaration.md | 80 +
.../generativeai/protos/FunctionResponse.md | 61 +
.../protos/GenerateAnswerRequest.md | 151 +
.../GenerateAnswerRequest/AnswerStyle.md | 698 ++
.../protos/GenerateAnswerResponse.md | 104 +
.../GenerateAnswerResponse/InputFeedback.md | 65 +
.../InputFeedback/BlockReason.md | 680 ++
.../protos/GenerateContentRequest.md | 151 +
.../protos/GenerateContentResponse.md | 86 +
.../GenerateContentResponse/PromptFeedback.md | 64 +
.../PromptFeedback/BlockReason.md | 680 ++
.../GenerateContentResponse/UsageMetadata.md | 80 +
.../protos/GenerateMessageRequest.md | 124 +
.../protos/GenerateMessageResponse.md | 75 +
.../protos/GenerateTextRequest.md | 189 +
.../protos/GenerateTextResponse.md | 78 +
.../generativeai/protos/GenerationConfig.md | 172 +
.../protos/GetCachedContentRequest.md | 49 +
.../generativeai/protos/GetChunkRequest.md | 49 +
.../generativeai/protos/GetCorpusRequest.md | 49 +
.../generativeai/protos/GetDocumentRequest.md | 49 +
.../generativeai/protos/GetFileRequest.md | 49 +
.../generativeai/protos/GetModelRequest.md | 53 +
.../protos/GetPermissionRequest.md | 52 +
.../protos/GetTunedModelRequest.md | 50 +
.../protos/GroundingAttribution.md | 59 +
.../generativeai/protos/GroundingPassage.md | 58 +
.../generativeai/protos/GroundingPassages.md | 48 +
.../generativeai/protos/HarmCategory.md | 822 ++
.../generativeai/protos/Hyperparameters.md | 100 +
.../protos/ListCachedContentsRequest.md | 68 +
.../protos/ListCachedContentsResponse.md | 59 +
.../generativeai/protos/ListChunksRequest.md | 80 +
.../generativeai/protos/ListChunksResponse.md | 60 +
.../generativeai/protos/ListCorporaRequest.md | 69 +
.../protos/ListCorporaResponse.md | 60 +
.../protos/ListDocumentsRequest.md | 79 +
.../protos/ListDocumentsResponse.md | 60 +
.../generativeai/protos/ListFilesRequest.md | 59 +
.../generativeai/protos/ListFilesResponse.md | 58 +
.../generativeai/protos/ListModelsRequest.md | 69 +
.../generativeai/protos/ListModelsResponse.md | 60 +
.../protos/ListPermissionsRequest.md | 80 +
.../protos/ListPermissionsResponse.md | 60 +
.../protos/ListTunedModelsRequest.md | 97 +
.../protos/ListTunedModelsResponse.md | 60 +
.../api/google/generativeai/protos/Message.md | 86 +
.../generativeai/protos/MessagePrompt.md | 103 +
.../generativeai/protos/MetadataFilter.md | 63 +
docs/api/google/generativeai/protos/Model.md | 193 +
docs/api/google/generativeai/protos/Part.md | 136 +
.../google/generativeai/protos/Permission.md | 110 +
.../protos/Permission/GranteeType.md | 698 ++
.../generativeai/protos/Permission/Role.md | 697 ++
.../generativeai/protos/QueryCorpusRequest.md | 109 +
.../protos/QueryCorpusResponse.md | 48 +
.../protos/QueryDocumentRequest.md | 109 +
.../protos/QueryDocumentResponse.md | 48 +
.../generativeai/protos/RelevantChunk.md | 57 +
.../generativeai/protos/SafetyFeedback.md | 63 +
.../generativeai/protos/SafetyRating.md | 77 +
.../generativeai/protos/SafetySetting.md | 64 +
docs/api/google/generativeai/protos/Schema.md | 132 +
.../protos/Schema/PropertiesEntry.md | 89 +
.../protos/SemanticRetrieverConfig.md | 92 +
.../google/generativeai/protos/StringList.md | 48 +
.../google/generativeai/protos/TaskType.md | 771 ++
.../generativeai/protos/TextCompletion.md | 74 +
.../google/generativeai/protos/TextPrompt.md | 50 +
docs/api/google/generativeai/protos/Tool.md | 73 +
.../google/generativeai/protos/ToolConfig.md | 48 +
.../protos/TransferOwnershipRequest.md | 61 +
.../protos/TransferOwnershipResponse.md | 27 +
.../google/generativeai/protos/TunedModel.md | 197 +
.../generativeai/protos/TunedModelSource.md | 61 +
.../generativeai/protos/TuningExample.md | 59 +
.../generativeai/protos/TuningExamples.md | 50 +
.../generativeai/protos/TuningSnapshot.md | 77 +
.../google/generativeai/protos/TuningTask.md | 89 +
docs/api/google/generativeai/protos/Type.md | 746 ++
.../protos/UpdateCachedContentRequest.md | 57 +
.../generativeai/protos/UpdateChunkRequest.md | 58 +
.../protos/UpdateCorpusRequest.md | 58 +
.../protos/UpdateDocumentRequest.md | 58 +
.../protos/UpdatePermissionRequest.md | 62 +
.../protos/UpdateTunedModelRequest.md | 57 +
.../generativeai/protos/VideoMetadata.md | 48 +
docs/api/google/generativeai/types.md | 182 +
.../generativeai/types/AnyModelNameOptions.md | 27 +
.../types/AsyncGenerateContentResponse.md | 152 +
.../google/generativeai/types/AuthorError.md | 27 +
.../types/BaseModelNameOptions.md | 25 +
.../api/google/generativeai/types/BlobDict.md | 27 +
.../api/google/generativeai/types/BlobType.md | 26 +
.../types/BlockedPromptException.md | 27 +
.../generativeai/types/BlockedReason.md | 687 ++
.../generativeai/types/BrokenResponseError.md | 27 +
.../types/CallableFunctionDeclaration.md | 144 +
.../google/generativeai/types/ChatResponse.md | 223 +
.../types/CitationMetadataDict.md | 48 +
.../generativeai/types/CitationSourceDict.md | 84 +
.../google/generativeai/types/Completion.md | 97 +
.../google/generativeai/types/ContentDict.md | 27 +
.../generativeai/types/ContentFilterDict.md | 62 +
.../google/generativeai/types/ContentType.md | 38 +
.../google/generativeai/types/ContentsType.md | 40 +
.../google/generativeai/types/ExampleDict.md | 27 +
.../generativeai/types/ExampleOptions.md | 26 +
.../generativeai/types/ExamplesOptions.md | 27 +
docs/api/google/generativeai/types/File.md | 170 +
.../google/generativeai/types/FileDataDict.md | 27 +
.../google/generativeai/types/FileDataType.md | 26 +
.../generativeai/types/FunctionDeclaration.md | 121 +
.../types/FunctionDeclarationType.md | 26 +
.../generativeai/types/FunctionLibrary.md | 80 +
.../generativeai/types/FunctionLibraryType.md | 32 +
.../types/GenerateContentResponse.md | 185 +
.../generativeai/types/GenerationConfig.md | 255 +
.../types/GenerationConfigDict.md | 27 +
.../types/GenerationConfigType.md | 25 +
.../generativeai/types/HarmBlockThreshold.md | 722 ++
.../google/generativeai/types/HarmCategory.md | 657 ++
.../generativeai/types/HarmProbability.md | 724 ++
.../types/IncompleteIterationError.md | 27 +
.../google/generativeai/types/MessageDict.md | 27 +
.../generativeai/types/MessageOptions.md | 25 +
.../generativeai/types/MessagePromptDict.md | 27 +
.../types/MessagePromptOptions.md | 27 +
.../generativeai/types/MessagesOptions.md | 26 +
docs/api/google/generativeai/types/Model.md | 205 +
.../generativeai/types/ModelsIterable.md | 23 +
.../api/google/generativeai/types/PartDict.md | 27 +
.../api/google/generativeai/types/PartType.md | 35 +
.../google/generativeai/types/Permission.md | 274 +
.../google/generativeai/types/Permissions.md | 386 +
.../generativeai/types/RequestOptions.md | 209 +
.../generativeai/types/RequestOptionsType.md | 24 +
.../google/generativeai/types/ResponseDict.md | 27 +
.../generativeai/types/SafetyFeedbackDict.md | 63 +
.../generativeai/types/SafetyRatingDict.md | 73 +
.../generativeai/types/SafetySettingDict.md | 60 +
docs/api/google/generativeai/types/Status.md | 55 +
.../types/StopCandidateException.md | 27 +
.../generativeai/types/StrictContentType.md | 24 +
docs/api/google/generativeai/types/Tool.md | 107 +
.../api/google/generativeai/types/ToolDict.md | 27 +
.../google/generativeai/types/ToolsType.md | 31 +
.../google/generativeai/types/TunedModel.md | 272 +
.../types/TunedModelNameOptions.md | 25 +
.../generativeai/types/TunedModelState.md | 703 ++
.../google/generativeai/types/TypedDict.md | 73 +
.../types/get_default_file_client.md | 30 +
.../google/generativeai/types/to_file_data.md | 32 +
.../google/generativeai/update_tuned_model.md | 38 +
docs/api/google/generativeai/upload_file.md | 105 +
256 files changed, 42898 insertions(+), 1 deletion(-)
create mode 100644 docs/api/google/generativeai.md
create mode 100644 docs/api/google/generativeai/ChatSession.md
create mode 100644 docs/api/google/generativeai/GenerativeModel.md
create mode 100644 docs/api/google/generativeai/_api_cache.json
create mode 100644 docs/api/google/generativeai/_redirects.yaml
create mode 100644 docs/api/google/generativeai/_toc.yaml
create mode 100644 docs/api/google/generativeai/all_symbols.md
create mode 100644 docs/api/google/generativeai/api_report.pb
create mode 100644 docs/api/google/generativeai/chat.md
create mode 100644 docs/api/google/generativeai/chat_async.md
create mode 100644 docs/api/google/generativeai/configure.md
create mode 100644 docs/api/google/generativeai/count_message_tokens.md
create mode 100644 docs/api/google/generativeai/count_text_tokens.md
create mode 100644 docs/api/google/generativeai/create_tuned_model.md
create mode 100644 docs/api/google/generativeai/delete_file.md
create mode 100644 docs/api/google/generativeai/delete_tuned_model.md
create mode 100644 docs/api/google/generativeai/embed_content.md
create mode 100644 docs/api/google/generativeai/embed_content_async.md
create mode 100644 docs/api/google/generativeai/generate_embeddings.md
create mode 100644 docs/api/google/generativeai/generate_text.md
create mode 100644 docs/api/google/generativeai/get_base_model.md
create mode 100644 docs/api/google/generativeai/get_file.md
create mode 100644 docs/api/google/generativeai/get_model.md
create mode 100644 docs/api/google/generativeai/get_operation.md
create mode 100644 docs/api/google/generativeai/get_tuned_model.md
create mode 100644 docs/api/google/generativeai/list_files.md
create mode 100644 docs/api/google/generativeai/list_models.md
create mode 100644 docs/api/google/generativeai/list_operations.md
create mode 100644 docs/api/google/generativeai/list_tuned_models.md
create mode 100644 docs/api/google/generativeai/protos.md
create mode 100644 docs/api/google/generativeai/protos/AttributionSourceId.md
create mode 100644 docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
create mode 100644 docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
create mode 100644 docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
create mode 100644 docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
create mode 100644 docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
create mode 100644 docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
create mode 100644 docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
create mode 100644 docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
create mode 100644 docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
create mode 100644 docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
create mode 100644 docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
create mode 100644 docs/api/google/generativeai/protos/Blob.md
create mode 100644 docs/api/google/generativeai/protos/CachedContent.md
create mode 100644 docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
create mode 100644 docs/api/google/generativeai/protos/Candidate.md
create mode 100644 docs/api/google/generativeai/protos/Candidate/FinishReason.md
create mode 100644 docs/api/google/generativeai/protos/Chunk.md
create mode 100644 docs/api/google/generativeai/protos/Chunk/State.md
create mode 100644 docs/api/google/generativeai/protos/ChunkData.md
create mode 100644 docs/api/google/generativeai/protos/CitationMetadata.md
create mode 100644 docs/api/google/generativeai/protos/CitationSource.md
create mode 100644 docs/api/google/generativeai/protos/CodeExecution.md
create mode 100644 docs/api/google/generativeai/protos/CodeExecutionResult.md
create mode 100644 docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
create mode 100644 docs/api/google/generativeai/protos/Condition.md
create mode 100644 docs/api/google/generativeai/protos/Condition/Operator.md
create mode 100644 docs/api/google/generativeai/protos/Content.md
create mode 100644 docs/api/google/generativeai/protos/ContentEmbedding.md
create mode 100644 docs/api/google/generativeai/protos/ContentFilter.md
create mode 100644 docs/api/google/generativeai/protos/Corpus.md
create mode 100644 docs/api/google/generativeai/protos/CountMessageTokensRequest.md
create mode 100644 docs/api/google/generativeai/protos/CountMessageTokensResponse.md
create mode 100644 docs/api/google/generativeai/protos/CountTextTokensRequest.md
create mode 100644 docs/api/google/generativeai/protos/CountTextTokensResponse.md
create mode 100644 docs/api/google/generativeai/protos/CountTokensRequest.md
create mode 100644 docs/api/google/generativeai/protos/CountTokensResponse.md
create mode 100644 docs/api/google/generativeai/protos/CreateCachedContentRequest.md
create mode 100644 docs/api/google/generativeai/protos/CreateChunkRequest.md
create mode 100644 docs/api/google/generativeai/protos/CreateCorpusRequest.md
create mode 100644 docs/api/google/generativeai/protos/CreateDocumentRequest.md
create mode 100644 docs/api/google/generativeai/protos/CreateFileRequest.md
create mode 100644 docs/api/google/generativeai/protos/CreateFileResponse.md
create mode 100644 docs/api/google/generativeai/protos/CreatePermissionRequest.md
create mode 100644 docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
create mode 100644 docs/api/google/generativeai/protos/CreateTunedModelRequest.md
create mode 100644 docs/api/google/generativeai/protos/CustomMetadata.md
create mode 100644 docs/api/google/generativeai/protos/Dataset.md
create mode 100644 docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
create mode 100644 docs/api/google/generativeai/protos/DeleteChunkRequest.md
create mode 100644 docs/api/google/generativeai/protos/DeleteCorpusRequest.md
create mode 100644 docs/api/google/generativeai/protos/DeleteDocumentRequest.md
create mode 100644 docs/api/google/generativeai/protos/DeleteFileRequest.md
create mode 100644 docs/api/google/generativeai/protos/DeletePermissionRequest.md
create mode 100644 docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
create mode 100644 docs/api/google/generativeai/protos/Document.md
create mode 100644 docs/api/google/generativeai/protos/EmbedContentRequest.md
create mode 100644 docs/api/google/generativeai/protos/EmbedContentResponse.md
create mode 100644 docs/api/google/generativeai/protos/EmbedTextRequest.md
create mode 100644 docs/api/google/generativeai/protos/EmbedTextResponse.md
create mode 100644 docs/api/google/generativeai/protos/Embedding.md
create mode 100644 docs/api/google/generativeai/protos/Example.md
create mode 100644 docs/api/google/generativeai/protos/ExecutableCode.md
create mode 100644 docs/api/google/generativeai/protos/ExecutableCode/Language.md
create mode 100644 docs/api/google/generativeai/protos/File.md
create mode 100644 docs/api/google/generativeai/protos/File/State.md
create mode 100644 docs/api/google/generativeai/protos/FileData.md
create mode 100644 docs/api/google/generativeai/protos/FunctionCall.md
create mode 100644 docs/api/google/generativeai/protos/FunctionCallingConfig.md
create mode 100644 docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
create mode 100644 docs/api/google/generativeai/protos/FunctionDeclaration.md
create mode 100644 docs/api/google/generativeai/protos/FunctionResponse.md
create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerRequest.md
create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse.md
create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
create mode 100644 docs/api/google/generativeai/protos/GenerateContentRequest.md
create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse.md
create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
create mode 100644 docs/api/google/generativeai/protos/GenerateMessageRequest.md
create mode 100644 docs/api/google/generativeai/protos/GenerateMessageResponse.md
create mode 100644 docs/api/google/generativeai/protos/GenerateTextRequest.md
create mode 100644 docs/api/google/generativeai/protos/GenerateTextResponse.md
create mode 100644 docs/api/google/generativeai/protos/GenerationConfig.md
create mode 100644 docs/api/google/generativeai/protos/GetCachedContentRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetChunkRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetCorpusRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetDocumentRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetFileRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetModelRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetPermissionRequest.md
create mode 100644 docs/api/google/generativeai/protos/GetTunedModelRequest.md
create mode 100644 docs/api/google/generativeai/protos/GroundingAttribution.md
create mode 100644 docs/api/google/generativeai/protos/GroundingPassage.md
create mode 100644 docs/api/google/generativeai/protos/GroundingPassages.md
create mode 100644 docs/api/google/generativeai/protos/HarmCategory.md
create mode 100644 docs/api/google/generativeai/protos/Hyperparameters.md
create mode 100644 docs/api/google/generativeai/protos/ListCachedContentsRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListCachedContentsResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListChunksRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListChunksResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListCorporaRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListCorporaResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListDocumentsRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListDocumentsResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListFilesRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListFilesResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListModelsRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListModelsResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListPermissionsRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListPermissionsResponse.md
create mode 100644 docs/api/google/generativeai/protos/ListTunedModelsRequest.md
create mode 100644 docs/api/google/generativeai/protos/ListTunedModelsResponse.md
create mode 100644 docs/api/google/generativeai/protos/Message.md
create mode 100644 docs/api/google/generativeai/protos/MessagePrompt.md
create mode 100644 docs/api/google/generativeai/protos/MetadataFilter.md
create mode 100644 docs/api/google/generativeai/protos/Model.md
create mode 100644 docs/api/google/generativeai/protos/Part.md
create mode 100644 docs/api/google/generativeai/protos/Permission.md
create mode 100644 docs/api/google/generativeai/protos/Permission/GranteeType.md
create mode 100644 docs/api/google/generativeai/protos/Permission/Role.md
create mode 100644 docs/api/google/generativeai/protos/QueryCorpusRequest.md
create mode 100644 docs/api/google/generativeai/protos/QueryCorpusResponse.md
create mode 100644 docs/api/google/generativeai/protos/QueryDocumentRequest.md
create mode 100644 docs/api/google/generativeai/protos/QueryDocumentResponse.md
create mode 100644 docs/api/google/generativeai/protos/RelevantChunk.md
create mode 100644 docs/api/google/generativeai/protos/SafetyFeedback.md
create mode 100644 docs/api/google/generativeai/protos/SafetyRating.md
create mode 100644 docs/api/google/generativeai/protos/SafetySetting.md
create mode 100644 docs/api/google/generativeai/protos/Schema.md
create mode 100644 docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
create mode 100644 docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
create mode 100644 docs/api/google/generativeai/protos/StringList.md
create mode 100644 docs/api/google/generativeai/protos/TaskType.md
create mode 100644 docs/api/google/generativeai/protos/TextCompletion.md
create mode 100644 docs/api/google/generativeai/protos/TextPrompt.md
create mode 100644 docs/api/google/generativeai/protos/Tool.md
create mode 100644 docs/api/google/generativeai/protos/ToolConfig.md
create mode 100644 docs/api/google/generativeai/protos/TransferOwnershipRequest.md
create mode 100644 docs/api/google/generativeai/protos/TransferOwnershipResponse.md
create mode 100644 docs/api/google/generativeai/protos/TunedModel.md
create mode 100644 docs/api/google/generativeai/protos/TunedModelSource.md
create mode 100644 docs/api/google/generativeai/protos/TuningExample.md
create mode 100644 docs/api/google/generativeai/protos/TuningExamples.md
create mode 100644 docs/api/google/generativeai/protos/TuningSnapshot.md
create mode 100644 docs/api/google/generativeai/protos/TuningTask.md
create mode 100644 docs/api/google/generativeai/protos/Type.md
create mode 100644 docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
create mode 100644 docs/api/google/generativeai/protos/UpdateChunkRequest.md
create mode 100644 docs/api/google/generativeai/protos/UpdateCorpusRequest.md
create mode 100644 docs/api/google/generativeai/protos/UpdateDocumentRequest.md
create mode 100644 docs/api/google/generativeai/protos/UpdatePermissionRequest.md
create mode 100644 docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
create mode 100644 docs/api/google/generativeai/protos/VideoMetadata.md
create mode 100644 docs/api/google/generativeai/types.md
create mode 100644 docs/api/google/generativeai/types/AnyModelNameOptions.md
create mode 100644 docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
create mode 100644 docs/api/google/generativeai/types/AuthorError.md
create mode 100644 docs/api/google/generativeai/types/BaseModelNameOptions.md
create mode 100644 docs/api/google/generativeai/types/BlobDict.md
create mode 100644 docs/api/google/generativeai/types/BlobType.md
create mode 100644 docs/api/google/generativeai/types/BlockedPromptException.md
create mode 100644 docs/api/google/generativeai/types/BlockedReason.md
create mode 100644 docs/api/google/generativeai/types/BrokenResponseError.md
create mode 100644 docs/api/google/generativeai/types/CallableFunctionDeclaration.md
create mode 100644 docs/api/google/generativeai/types/ChatResponse.md
create mode 100644 docs/api/google/generativeai/types/CitationMetadataDict.md
create mode 100644 docs/api/google/generativeai/types/CitationSourceDict.md
create mode 100644 docs/api/google/generativeai/types/Completion.md
create mode 100644 docs/api/google/generativeai/types/ContentDict.md
create mode 100644 docs/api/google/generativeai/types/ContentFilterDict.md
create mode 100644 docs/api/google/generativeai/types/ContentType.md
create mode 100644 docs/api/google/generativeai/types/ContentsType.md
create mode 100644 docs/api/google/generativeai/types/ExampleDict.md
create mode 100644 docs/api/google/generativeai/types/ExampleOptions.md
create mode 100644 docs/api/google/generativeai/types/ExamplesOptions.md
create mode 100644 docs/api/google/generativeai/types/File.md
create mode 100644 docs/api/google/generativeai/types/FileDataDict.md
create mode 100644 docs/api/google/generativeai/types/FileDataType.md
create mode 100644 docs/api/google/generativeai/types/FunctionDeclaration.md
create mode 100644 docs/api/google/generativeai/types/FunctionDeclarationType.md
create mode 100644 docs/api/google/generativeai/types/FunctionLibrary.md
create mode 100644 docs/api/google/generativeai/types/FunctionLibraryType.md
create mode 100644 docs/api/google/generativeai/types/GenerateContentResponse.md
create mode 100644 docs/api/google/generativeai/types/GenerationConfig.md
create mode 100644 docs/api/google/generativeai/types/GenerationConfigDict.md
create mode 100644 docs/api/google/generativeai/types/GenerationConfigType.md
create mode 100644 docs/api/google/generativeai/types/HarmBlockThreshold.md
create mode 100644 docs/api/google/generativeai/types/HarmCategory.md
create mode 100644 docs/api/google/generativeai/types/HarmProbability.md
create mode 100644 docs/api/google/generativeai/types/IncompleteIterationError.md
create mode 100644 docs/api/google/generativeai/types/MessageDict.md
create mode 100644 docs/api/google/generativeai/types/MessageOptions.md
create mode 100644 docs/api/google/generativeai/types/MessagePromptDict.md
create mode 100644 docs/api/google/generativeai/types/MessagePromptOptions.md
create mode 100644 docs/api/google/generativeai/types/MessagesOptions.md
create mode 100644 docs/api/google/generativeai/types/Model.md
create mode 100644 docs/api/google/generativeai/types/ModelsIterable.md
create mode 100644 docs/api/google/generativeai/types/PartDict.md
create mode 100644 docs/api/google/generativeai/types/PartType.md
create mode 100644 docs/api/google/generativeai/types/Permission.md
create mode 100644 docs/api/google/generativeai/types/Permissions.md
create mode 100644 docs/api/google/generativeai/types/RequestOptions.md
create mode 100644 docs/api/google/generativeai/types/RequestOptionsType.md
create mode 100644 docs/api/google/generativeai/types/ResponseDict.md
create mode 100644 docs/api/google/generativeai/types/SafetyFeedbackDict.md
create mode 100644 docs/api/google/generativeai/types/SafetyRatingDict.md
create mode 100644 docs/api/google/generativeai/types/SafetySettingDict.md
create mode 100644 docs/api/google/generativeai/types/Status.md
create mode 100644 docs/api/google/generativeai/types/StopCandidateException.md
create mode 100644 docs/api/google/generativeai/types/StrictContentType.md
create mode 100644 docs/api/google/generativeai/types/Tool.md
create mode 100644 docs/api/google/generativeai/types/ToolDict.md
create mode 100644 docs/api/google/generativeai/types/ToolsType.md
create mode 100644 docs/api/google/generativeai/types/TunedModel.md
create mode 100644 docs/api/google/generativeai/types/TunedModelNameOptions.md
create mode 100644 docs/api/google/generativeai/types/TunedModelState.md
create mode 100644 docs/api/google/generativeai/types/TypedDict.md
create mode 100644 docs/api/google/generativeai/types/get_default_file_client.md
create mode 100644 docs/api/google/generativeai/types/to_file_data.md
create mode 100644 docs/api/google/generativeai/update_tuned_model.md
create mode 100644 docs/api/google/generativeai/upload_file.md
diff --git a/.gitignore b/.gitignore
index 10692be5c..72ac0ed80 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,6 @@
/.idea/
/.pytype/
/build/
-/docs/api
*.egg-info
.DS_Store
__pycache__
diff --git a/docs/api/google/generativeai.md b/docs/api/google/generativeai.md
new file mode 100644
index 000000000..23ee47866
--- /dev/null
+++ b/docs/api/google/generativeai.md
@@ -0,0 +1,138 @@
+description: Google AI Python SDK
+
+
+
+
+
+
+
+
+# Module: google.generativeai
+
+
+
+
+
+
+
+Google AI Python SDK
+
+
+
+## Setup
+
+```posix-terminal
+pip install google-generativeai
+```
+
+## GenerativeModel
+
+Use `genai.GenerativeModel` to access the API:
+
+```
+import google.generativeai as genai
+import os
+
+genai.configure(api_key=os.environ['API_KEY'])
+
+model = genai.GenerativeModel(model_name='gemini-1.5-flash')
+response = model.generate_content('Teach me about how an LLM works')
+
+print(response.text)
+```
+
+See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details.
+
+## Modules
+
+[`protos`](../google/generativeai/protos.md) module: This module provides low level access to the ProtoBuffer "Message" classes used by the API.
+
+[`types`](../google/generativeai/types.md) module: A collection of type definitions used throughout the library.
+
+## Classes
+
+[`class ChatSession`](../google/generativeai/ChatSession.md): Contains an ongoing conversation with the model.
+
+[`class GenerationConfig`](../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+[`class GenerativeModel`](../google/generativeai/GenerativeModel.md): The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
+
+## Functions
+
+[`chat(...)`](../google/generativeai/chat.md): Calls the API to initiate a chat with a model using provided parameters
+
+[`chat_async(...)`](../google/generativeai/chat_async.md): Calls the API to initiate a chat with a model using provided parameters
+
+[`configure(...)`](../google/generativeai/configure.md): Captures default client configuration.
+
+[`count_message_tokens(...)`](../google/generativeai/count_message_tokens.md): Calls the API to calculate the number of tokens used in the prompt.
+
+[`count_text_tokens(...)`](../google/generativeai/count_text_tokens.md): Calls the API to count the number of tokens in the text prompt.
+
+[`create_tuned_model(...)`](../google/generativeai/create_tuned_model.md): Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
+
+[`delete_file(...)`](../google/generativeai/delete_file.md): Calls the API to permanently delete a specified file using a supported file service.
+
+[`delete_tuned_model(...)`](../google/generativeai/delete_tuned_model.md): Calls the API to delete a specified tuned model
+
+[`embed_content(...)`](../google/generativeai/embed_content.md): Calls the API to create embeddings for content passed in.
+
+[`embed_content_async(...)`](../google/generativeai/embed_content_async.md): Calls the API to create async embeddings for content passed in.
+
+[`generate_embeddings(...)`](../google/generativeai/generate_embeddings.md): Calls the API to create an embedding for the text passed in.
+
+[`generate_text(...)`](../google/generativeai/generate_text.md): Calls the API to generate text based on the provided prompt.
+
+[`get_base_model(...)`](../google/generativeai/get_base_model.md): Calls the API to fetch a base model by name.
+
+[`get_file(...)`](../google/generativeai/get_file.md): Calls the API to retrieve a specified file using a supported file service.
+
+[`get_model(...)`](../google/generativeai/get_model.md): Calls the API to fetch a model by name.
+
+[`get_operation(...)`](../google/generativeai/get_operation.md): Calls the API to get a specific operation
+
+[`get_tuned_model(...)`](../google/generativeai/get_tuned_model.md): Calls the API to fetch a tuned model by name.
+
+[`list_files(...)`](../google/generativeai/list_files.md): Calls the API to list files using a supported file service.
+
+[`list_models(...)`](../google/generativeai/list_models.md): Calls the API to list all available models.
+
+[`list_operations(...)`](../google/generativeai/list_operations.md): Calls the API to list all operations
+
+[`list_tuned_models(...)`](../google/generativeai/list_tuned_models.md): Calls the API to list all tuned models.
+
+[`update_tuned_model(...)`](../google/generativeai/update_tuned_model.md): Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
+
+[`upload_file(...)`](../google/generativeai/upload_file.md): Calls the API to upload a file using a supported file service.
+
+
+
+
+
+
+Other Members |
+
+
+
+__version__
+ |
+
+`'0.7.2'`
+ |
+
+
+annotations
+ |
+
+Instance of `__future__._Feature`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md
new file mode 100644
index 000000000..3898a2ef1
--- /dev/null
+++ b/docs/api/google/generativeai/ChatSession.md
@@ -0,0 +1,222 @@
+description: Contains an ongoing conversation with the model.
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.ChatSession
+
+
+
+
+
+
+
+Contains an ongoing conversation with the model.
+
+
+google.generativeai.ChatSession(
+ model: GenerativeModel,
+ history: (Iterable[content_types.StrictContentType] | None) = None,
+ enable_automatic_function_calling: bool = False
+)
+
+
+
+
+
+
+```
+>>> model = genai.GenerativeModel('models/gemini-pro')
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Hello")
+>>> print(response.text)
+>>> response = chat.send_message("Hello again")
+>>> print(response.text)
+>>> response = chat.send_message(...
+```
+
+This `ChatSession` object collects the messages sent and received, in its
+ChatSession.history
attribute.
+
+
+
+
+Arguments |
+
+
+
+`model`
+ |
+
+The model to use in the chat.
+ |
+
+
+`history`
+ |
+
+A chat history to initialize the object with.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`history`
+ |
+
+The chat history.
+ |
+
+
+`last`
+ |
+
+returns the last received `genai.GenerateContentResponse`
+ |
+
+
+
+
+
+## Methods
+
+rewind
+
+View source
+
+
+rewind() -> tuple[protos.Content, protos.Content]
+
+
+Removes the last request/response pair from the chat history.
+
+
+send_message
+
+View source
+
+
+send_message(
+ content: content_types.ContentType,
+ *,
+ generation_config: generation_types.GenerationConfigType = None,
+ safety_settings: safety_types.SafetySettingOptions = None,
+ stream: bool = False,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> generation_types.GenerateContentResponse
+
+
+Sends the conversation history with the added message and returns the model's response.
+
+Appends the request and response to the conversation history.
+
+```
+>>> model = genai.GenerativeModel('models/gemini-pro')
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Hello")
+>>> print(response.text)
+"Hello! How can I assist you today?"
+>>> len(chat.history)
+2
+```
+
+Call it with `stream=True` to receive response chunks as they are generated:
+
+```
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Explain quantum physics", stream=True)
+>>> for chunk in response:
+... print(chunk.text, end='')
+```
+
+Once iteration over chunks is complete, the `response` and `ChatSession` are in states identical to the
+`stream=False` case. Some properties are not available until iteration is complete.
+
+Like GenerativeModel.generate_content
this method lets you override the model's `generation_config` and
+`safety_settings`.
+
+
+
+
+Arguments |
+
+
+
+`content`
+ |
+
+The message contents.
+ |
+
+
+`generation_config`
+ |
+
+Overrides for the model's generation config.
+ |
+
+
+`safety_settings`
+ |
+
+Overrides for the model's safety settings.
+ |
+
+
+`stream`
+ |
+
+If True, yield response chunks as they are generated.
+ |
+
+
+
+
+
+send_message_async
+
+View source
+
+
+send_message_async(
+ content,
+ *,
+ generation_config=None,
+ safety_settings=None,
+ stream=False,
+ tools=None,
+ tool_config=None,
+ request_options=None
+)
+
+
+The async version of ChatSession.send_message
.
+
+
+
+
diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md
new file mode 100644
index 000000000..9b9e7ff6f
--- /dev/null
+++ b/docs/api/google/generativeai/GenerativeModel.md
@@ -0,0 +1,429 @@
+description: The genai.GenerativeModel class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.GenerativeModel
+
+
+
+
+
+
+
+The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
+
+
+google.generativeai.GenerativeModel(
+ model_name: str = 'gemini-pro',
+ safety_settings: (safety_types.SafetySettingOptions | None) = None,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ system_instruction: (content_types.ContentType | None) = None
+)
+
+
+
+
+
+
+This family of functionality is designed to support multi-turn conversations, and multimodal
+requests. What media-types are supported for input and output is model-dependant.
+
+```
+>>> import google.generativeai as genai
+>>> import PIL.Image
+>>> genai.configure(api_key='YOUR_API_KEY')
+>>> model = genai.GenerativeModel('models/gemini-pro')
+>>> result = model.generate_content('Tell me a story about a magic backpack')
+>>> result.text
+"In the quaint little town of Lakeside, there lived a young girl named Lily..."
+```
+
+#### Multimodal input:
+
+
+
+```
+>>> model = genai.GenerativeModel('models/gemini-pro')
+>>> result = model.generate_content([
+... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
+>>> result.text
+"**Blueberry Scones** ..."
+```
+
+Multi-turn conversation:
+
+```
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Hi, I have some questions for you.")
+>>> response.text
+"Sure, I'll do my best to answer your questions..."
+```
+
+To list the compatible model names use:
+
+```
+>>> for m in genai.list_models():
+... if 'generateContent' in m.supported_generation_methods:
+... print(m.name)
+```
+
+
+
+
+Arguments |
+
+
+
+`model_name`
+ |
+
+The name of the model to query. To list compatible models use
+ |
+
+
+`safety_settings`
+ |
+
+Sets the default safety filters. This controls which content is blocked
+by the api before being returned.
+ |
+
+
+`generation_config`
+ |
+
+A `genai.GenerationConfig` setting the default generation parameters to
+use.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`cached_content`
+ |
+
+
+ |
+
+
+`model_name`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+count_tokens
+
+View source
+
+
+count_tokens(
+ contents: content_types.ContentsType = None,
+ *,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ safety_settings: (safety_types.SafetySettingOptions | None) = None,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> protos.CountTokensResponse
+
+
+
+
+
+count_tokens_async
+
+View source
+
+
+count_tokens_async(
+ contents=None,
+ *,
+ generation_config=None,
+ safety_settings=None,
+ tools=None,
+ tool_config=None,
+ request_options=None
+)
+
+
+
+
+
+from_cached_content
+
+View source
+
+
+@classmethod
+from_cached_content(
+ cached_content: (str | caching.CachedContent),
+ *,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ safety_settings: (safety_types.SafetySettingOptions | None) = None
+) -> GenerativeModel
+
+
+Creates a model with `cached_content` as model's context.
+
+
+
+
+
+Args |
+
+
+
+`cached_content`
+ |
+
+context for the model.
+ |
+
+
+`generation_config`
+ |
+
+Overrides for the model's generation config.
+ |
+
+
+`safety_settings`
+ |
+
+Overrides for the model's safety settings.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+`GenerativeModel` object with `cached_content` as its context.
+ |
+
+
+
+
+
+
+generate_content
+
+View source
+
+
+generate_content(
+ contents: content_types.ContentsType,
+ *,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ safety_settings: (safety_types.SafetySettingOptions | None) = None,
+ stream: bool = False,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> generation_types.GenerateContentResponse
+
+
+A multipurpose function to generate responses from the model.
+
+This GenerativeModel.generate_content
method can handle multimodal input, and multi-turn
+conversations.
+
+```
+>>> model = genai.GenerativeModel('models/gemini-pro')
+>>> response = model.generate_content('Tell me a story about a magic backpack')
+>>> response.text
+```
+
+### Streaming
+
+This method supports streaming with the `stream=True`. The result has the same type as the non streaming case,
+but you can iterate over the response chunks as they become available:
+
+```
+>>> response = model.generate_content('Tell me a story about a magic backpack', stream=True)
+>>> for chunk in response:
+... print(chunk.text)
+```
+
+### Multi-turn
+
+This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each
+request. This takes some manual management but gives you complete control:
+
+```
+>>> messages = [{'role':'user', 'parts': ['hello']}]
+>>> response = model.generate_content(messages) # "Hello, how can I help"
+>>> messages.append(response.candidates[0].content)
+>>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']})
+>>> response = model.generate_content(messages)
+```
+
+For a simpler multi-turn interface see GenerativeModel.start_chat
.
+
+### Input type flexibility
+
+While the underlying API strictly expects a `list[protos.Content]` objects, this method
+will convert the user input into the correct type. The hierarchy of types that can be
+converted is below. Any of these objects can be passed as an equivalent `dict`.
+
+* `Iterable[protos.Content]`
+* protos.Content
+* `Iterable[protos.Part]`
+* protos.Part
+* `str`, `Image`, or protos.Blob
+
+In an `Iterable[protos.Content]` each `content` is a separate message.
+But note that an `Iterable[protos.Part]` is taken as the parts of a single message.
+
+
+
+
+Arguments |
+
+
+
+`contents`
+ |
+
+The contents serving as the model's prompt.
+ |
+
+
+`generation_config`
+ |
+
+Overrides for the model's generation config.
+ |
+
+
+`safety_settings`
+ |
+
+Overrides for the model's safety settings.
+ |
+
+
+`stream`
+ |
+
+If True, yield response chunks as they are generated.
+ |
+
+
+`tools`
+ |
+
+`protos.Tools` more info coming soon.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+generate_content_async
+
+View source
+
+
+generate_content_async(
+ contents,
+ *,
+ generation_config=None,
+ safety_settings=None,
+ stream=False,
+ tools=None,
+ tool_config=None,
+ request_options=None
+)
+
+
+The async version of GenerativeModel.generate_content
.
+
+
+start_chat
+
+View source
+
+
+start_chat(
+ *,
+ history: (Iterable[content_types.StrictContentType] | None) = None,
+ enable_automatic_function_calling: bool = False
+) -> ChatSession
+
+
+Returns a `genai.ChatSession` attached to this model.
+
+```
+>>> model = genai.GenerativeModel()
+>>> chat = model.start_chat(history=[...])
+>>> response = chat.send_message("Hello?")
+```
+
+
+
+
+Arguments |
+
+
+
+`history`
+ |
+
+An iterable of protos.Content objects, or equivalents to initialize the session.
+ |
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/_api_cache.json b/docs/api/google/generativeai/_api_cache.json
new file mode 100644
index 000000000..a1d446e53
--- /dev/null
+++ b/docs/api/google/generativeai/_api_cache.json
@@ -0,0 +1,9886 @@
+{
+ "duplicate_of": {
+ "google.generativeai.ChatSession.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.ChatSession.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.ChatSession.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.ChatSession.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.ChatSession.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.ChatSession.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.ChatSession.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.GenerationConfig": "google.generativeai.types.GenerationConfig",
+ "google.generativeai.GenerationConfig.__eq__": "google.generativeai.types.GenerationConfig.__eq__",
+ "google.generativeai.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.GenerationConfig.__init__": "google.generativeai.types.GenerationConfig.__init__",
+ "google.generativeai.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.GenerationConfig.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.GenerativeModel.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.GenerativeModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.GenerativeModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.GenerativeModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.GenerativeModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.GenerativeModel.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.GenerativeModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.annotations": "google.generativeai.types.annotations",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.AttributionSourceId.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.AttributionSourceId.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.AttributionSourceId.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.AttributionSourceId.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.AttributionSourceId.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.AttributionSourceId.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.AttributionSourceId.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.AttributionSourceId.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Blob.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Blob.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Blob.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Blob.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Blob.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Blob.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Blob.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Blob.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CachedContent.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CachedContent.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CachedContent.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CachedContent.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CachedContent.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CachedContent.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CachedContent.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CachedContent.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Candidate.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Candidate.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Candidate.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Candidate.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Candidate.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Chunk.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Chunk.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Chunk.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Chunk.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Chunk.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Chunk.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Chunk.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Chunk.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Chunk.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Chunk.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Chunk.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Chunk.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Chunk.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Chunk.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Chunk.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Chunk.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Chunk.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Chunk.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Chunk.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Chunk.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Chunk.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Chunk.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Chunk.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Chunk.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Chunk.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Chunk.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Chunk.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Chunk.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Chunk.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Chunk.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Chunk.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Chunk.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Chunk.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Chunk.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Chunk.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Chunk.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Chunk.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Chunk.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Chunk.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Chunk.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Chunk.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Chunk.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Chunk.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Chunk.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Chunk.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Chunk.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Chunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Chunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Chunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Chunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Chunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Chunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Chunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Chunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ChunkData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ChunkData.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ChunkData.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ChunkData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ChunkData.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ChunkData.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ChunkData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ChunkData.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CitationMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CitationMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CitationMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CitationMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CitationMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CitationMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CitationMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CitationMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CitationSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CitationSource.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CitationSource.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CitationSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CitationSource.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CitationSource.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CitationSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CitationSource.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CodeExecution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CodeExecution.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CodeExecution.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CodeExecution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CodeExecution.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CodeExecution.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CodeExecution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CodeExecution.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.CodeExecutionResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CodeExecutionResult.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CodeExecutionResult.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CodeExecutionResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CodeExecutionResult.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CodeExecutionResult.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CodeExecutionResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CodeExecutionResult.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Condition.Operator.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Condition.Operator.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Condition.Operator.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Condition.Operator.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Condition.Operator.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Condition.Operator.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Condition.Operator.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Condition.Operator.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Condition.Operator.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Condition.Operator.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Condition.Operator.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Condition.Operator.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Condition.Operator.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Condition.Operator.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Condition.Operator.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Condition.Operator.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Condition.Operator.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Condition.Operator.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Condition.Operator.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Condition.Operator.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Condition.Operator.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Condition.Operator.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Condition.Operator.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Condition.Operator.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Condition.Operator.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Condition.Operator.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Condition.Operator.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Condition.Operator.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Condition.Operator.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Condition.Operator.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Condition.Operator.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Condition.Operator.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Condition.Operator.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Condition.Operator.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Condition.Operator.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Condition.Operator.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Condition.Operator.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Condition.Operator.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Condition.Operator.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Condition.Operator.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Condition.Operator.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Condition.Operator.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Condition.Operator.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Condition.Operator.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Condition.Operator.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Condition.Operator.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Condition.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Condition.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Condition.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Condition.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Condition.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Condition.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Condition.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Condition.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Content.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Content.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Content.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Content.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Content.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Content.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Content.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Content.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ContentEmbedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ContentEmbedding.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ContentEmbedding.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ContentEmbedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ContentEmbedding.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ContentEmbedding.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ContentEmbedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ContentEmbedding.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ContentFilter.BlockedReason": "google.generativeai.types.BlockedReason",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.ContentFilter.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.ContentFilter.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.ContentFilter.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.ContentFilter.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.ContentFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ContentFilter.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ContentFilter.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ContentFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ContentFilter.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ContentFilter.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ContentFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ContentFilter.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Corpus.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Corpus.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Corpus.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Corpus.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Corpus.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Corpus.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Corpus.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Corpus.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CountMessageTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountMessageTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CountMessageTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CountMessageTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountMessageTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CountMessageTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CountMessageTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountMessageTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CountMessageTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountMessageTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CountMessageTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CountMessageTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountMessageTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CountMessageTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CountMessageTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountMessageTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CountTextTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTextTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CountTextTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CountTextTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTextTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CountTextTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CountTextTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTextTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CountTextTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTextTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CountTextTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CountTextTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTextTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CountTextTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CountTextTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTextTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CountTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CountTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CountTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CountTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CountTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CountTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CountTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CountTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CountTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CountTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateFileResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateFileResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateFileResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateFileResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateFileResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateFileResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateFileResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateFileResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreatePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreatePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreatePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreatePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreatePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CreateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CreateTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CreateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CreateTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CreateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.CustomMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CustomMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.CustomMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.CustomMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CustomMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.CustomMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.CustomMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CustomMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Dataset.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Dataset.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Dataset.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Dataset.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Dataset.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Dataset.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Dataset.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Dataset.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeleteChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeleteChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeleteChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeleteChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeleteChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeleteCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeleteCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeleteCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeleteCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeleteCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeleteDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeleteDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeleteDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeleteDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeleteDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeleteFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeleteFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeleteFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeleteFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeleteFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeletePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeletePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeletePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeletePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeletePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeletePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeletePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeletePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Document.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Document.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Document.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Document.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Document.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Document.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Document.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Document.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.EmbedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.EmbedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.EmbedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.EmbedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.EmbedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.EmbedContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.EmbedContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.EmbedContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.EmbedContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.EmbedContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.EmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.EmbedTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.EmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.EmbedTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.EmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.EmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.EmbedTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.EmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.EmbedTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.EmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Embedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Embedding.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Embedding.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Embedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Embedding.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Embedding.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Embedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Embedding.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Example.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Example.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Example.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Example.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Example.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Example.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Example.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Example.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ExecutableCode.Language.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.ExecutableCode.Language.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.ExecutableCode.Language.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.ExecutableCode.Language.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.ExecutableCode.Language.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.ExecutableCode.Language.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.ExecutableCode.Language.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.ExecutableCode.Language.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.ExecutableCode.Language.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.ExecutableCode.Language.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.ExecutableCode.Language.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.ExecutableCode.Language.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.ExecutableCode.Language.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.ExecutableCode.Language.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.ExecutableCode.Language.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.ExecutableCode.Language.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.ExecutableCode.Language.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.ExecutableCode.Language.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.ExecutableCode.Language.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.ExecutableCode.Language.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.ExecutableCode.Language.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.ExecutableCode.Language.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.ExecutableCode.Language.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.ExecutableCode.Language.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.ExecutableCode.Language.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.ExecutableCode.Language.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.ExecutableCode.Language.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.ExecutableCode.Language.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.ExecutableCode.Language.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.ExecutableCode.Language.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.ExecutableCode.Language.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.ExecutableCode.Language.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.ExecutableCode.Language.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.ExecutableCode.Language.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.ExecutableCode.Language.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.ExecutableCode.Language.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.ExecutableCode.Language.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.ExecutableCode.Language.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.ExecutableCode.Language.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.ExecutableCode.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ExecutableCode.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ExecutableCode.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ExecutableCode.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ExecutableCode.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ExecutableCode.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ExecutableCode.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ExecutableCode.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.File.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.File.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.File.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.File.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.File.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.File.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.File.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.File.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.File.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.File.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.File.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.File.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.File.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.File.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.File.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.File.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.File.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.File.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.File.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.File.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.File.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.File.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.File.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.File.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.File.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.File.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.File.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.File.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.File.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.File.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.File.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.File.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.File.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.File.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.File.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.File.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.File.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.File.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.File.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.File.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.File.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.File.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.File.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.File.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.File.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.File.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.File.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.File.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.File.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.File.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.File.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.File.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.File.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.File.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.FileData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FileData.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.FileData.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.FileData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FileData.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.FileData.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.FileData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FileData.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.FunctionCall.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionCall.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.FunctionCall.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.FunctionCall.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionCall.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.FunctionCall.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.FunctionCall.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionCall.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.FunctionCallingConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionCallingConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.FunctionCallingConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.FunctionCallingConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionCallingConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.FunctionCallingConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.FunctionCallingConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionCallingConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.FunctionDeclaration.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.FunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.FunctionDeclaration.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.FunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.FunctionDeclaration.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.FunctionResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.FunctionResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.FunctionResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.FunctionResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.FunctionResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.GenerateAnswerRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateAnswerRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateAnswerRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateAnswerRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateAnswerRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateAnswerRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateAnswerRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateAnswerRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateAnswerResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateAnswerResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateAnswerResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateAnswerResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateAnswerResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateAnswerResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateAnswerResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateAnswerResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateMessageRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateMessageRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateMessageRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateMessageRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateMessageRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateMessageRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateMessageRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateMessageRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateMessageResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateMessageResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateMessageResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateMessageResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateMessageResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateMessageResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateMessageResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateMessageResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerateTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerateTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerateTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerateTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerateTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GenerationConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GenerationConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GenerationConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetPermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetPermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetPermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetPermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetPermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetPermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetPermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetPermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GetTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GetTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GetTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GetTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GetTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GroundingAttribution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingAttribution.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GroundingAttribution.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GroundingAttribution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingAttribution.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GroundingAttribution.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GroundingAttribution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingAttribution.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GroundingPassage.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingPassage.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GroundingPassage.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GroundingPassage.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingPassage.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GroundingPassage.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GroundingPassage.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingPassage.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.GroundingPassages.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingPassages.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.GroundingPassages.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.GroundingPassages.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingPassages.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.GroundingPassages.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.GroundingPassages.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingPassages.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Hyperparameters.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Hyperparameters.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Hyperparameters.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Hyperparameters.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Hyperparameters.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Hyperparameters.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Hyperparameters.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Hyperparameters.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListCachedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCachedContentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListCachedContentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListCachedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCachedContentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListCachedContentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListCachedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCachedContentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListCachedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCachedContentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListCachedContentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListCachedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCachedContentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListCachedContentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListCachedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCachedContentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListCorporaRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCorporaRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListCorporaRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListCorporaRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCorporaRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListCorporaRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListCorporaRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCorporaRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListCorporaResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCorporaResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListCorporaResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListCorporaResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCorporaResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListCorporaResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListCorporaResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCorporaResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListDocumentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListDocumentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListDocumentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListDocumentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListDocumentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListDocumentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListDocumentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListDocumentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListDocumentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListDocumentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListDocumentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListDocumentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListDocumentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListDocumentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListDocumentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListDocumentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListFilesRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListFilesRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListFilesRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListFilesRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListFilesRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListFilesRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListFilesRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListFilesRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListFilesResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListFilesResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListFilesResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListFilesResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListFilesResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListFilesResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListFilesResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListFilesResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListModelsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListModelsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListModelsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListModelsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListModelsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListModelsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListModelsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListModelsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListModelsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListModelsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListPermissionsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListPermissionsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListPermissionsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListPermissionsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListPermissionsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListPermissionsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListPermissionsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListPermissionsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListPermissionsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListPermissionsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListPermissionsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListPermissionsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListPermissionsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListPermissionsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListPermissionsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListPermissionsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListTunedModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListTunedModelsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListTunedModelsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListTunedModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListTunedModelsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListTunedModelsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListTunedModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListTunedModelsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ListTunedModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListTunedModelsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ListTunedModelsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ListTunedModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListTunedModelsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ListTunedModelsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ListTunedModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListTunedModelsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Message.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Message.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Message.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Message.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Message.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Message.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Message.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Message.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.MessagePrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.MessagePrompt.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.MessagePrompt.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.MessagePrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.MessagePrompt.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.MessagePrompt.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.MessagePrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.MessagePrompt.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.MetadataFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.MetadataFilter.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.MetadataFilter.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.MetadataFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.MetadataFilter.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.MetadataFilter.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.MetadataFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.MetadataFilter.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Model.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Model.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Model.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Model.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Model.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Model.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Model.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Model.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Part.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Part.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Part.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Part.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Part.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Part.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Part.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Part.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Permission.GranteeType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Permission.GranteeType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Permission.GranteeType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Permission.GranteeType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Permission.GranteeType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Permission.GranteeType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Permission.GranteeType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Permission.GranteeType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Permission.GranteeType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Permission.GranteeType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Permission.GranteeType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Permission.GranteeType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Permission.GranteeType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Permission.GranteeType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Permission.GranteeType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Permission.GranteeType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Permission.GranteeType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Permission.GranteeType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Permission.GranteeType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Permission.GranteeType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Permission.GranteeType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Permission.GranteeType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Permission.GranteeType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Permission.GranteeType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Permission.GranteeType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Permission.GranteeType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Permission.GranteeType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Permission.GranteeType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Permission.GranteeType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Permission.GranteeType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Permission.GranteeType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Permission.GranteeType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Permission.GranteeType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Permission.GranteeType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Permission.GranteeType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Permission.GranteeType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Permission.GranteeType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Permission.GranteeType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Permission.GranteeType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Permission.GranteeType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Permission.GranteeType.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Permission.GranteeType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Permission.GranteeType.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Permission.GranteeType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Permission.Role.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Permission.Role.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Permission.Role.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Permission.Role.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Permission.Role.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Permission.Role.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Permission.Role.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Permission.Role.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Permission.Role.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Permission.Role.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Permission.Role.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Permission.Role.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Permission.Role.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Permission.Role.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Permission.Role.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Permission.Role.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Permission.Role.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Permission.Role.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Permission.Role.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Permission.Role.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Permission.Role.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Permission.Role.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Permission.Role.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Permission.Role.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Permission.Role.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Permission.Role.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Permission.Role.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Permission.Role.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Permission.Role.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Permission.Role.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Permission.Role.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Permission.Role.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Permission.Role.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Permission.Role.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Permission.Role.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Permission.Role.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Permission.Role.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Permission.Role.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Permission.Role.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Permission.Role.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Permission.Role.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Permission.Role.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Permission.Role.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Permission.Role.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Permission.Role.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Permission.Role.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Permission.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Permission.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Permission.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Permission.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Permission.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Permission.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Permission.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Permission.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.QueryCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.QueryCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.QueryCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.QueryCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.QueryCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.QueryCorpusResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryCorpusResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.QueryCorpusResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.QueryCorpusResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryCorpusResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.QueryCorpusResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.QueryCorpusResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryCorpusResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.QueryDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.QueryDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.QueryDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.QueryDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.QueryDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.QueryDocumentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryDocumentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.QueryDocumentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.QueryDocumentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryDocumentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.QueryDocumentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.QueryDocumentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryDocumentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.RelevantChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.RelevantChunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.RelevantChunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.RelevantChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.RelevantChunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.RelevantChunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.RelevantChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.RelevantChunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.SafetyFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SafetyFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.SafetyFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.SafetyFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SafetyFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.SafetyFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.SafetyFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SafetyFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.SafetyRating.HarmProbability": "google.generativeai.types.HarmProbability",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.SafetyRating.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.SafetyRating.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.SafetyRating.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.SafetyRating.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.SafetyRating.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SafetyRating.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.SafetyRating.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.SafetyRating.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SafetyRating.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.SafetyRating.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.SafetyRating.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SafetyRating.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold": "google.generativeai.types.HarmBlockThreshold",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.SafetySetting.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SafetySetting.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.SafetySetting.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.SafetySetting.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SafetySetting.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.SafetySetting.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.SafetySetting.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SafetySetting.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Schema.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Schema.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Schema.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Schema.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Schema.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Schema.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Schema.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Schema.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.StringList.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.StringList.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.StringList.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.StringList.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.StringList.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.StringList.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.StringList.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.StringList.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TaskType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.TaskType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.TaskType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.TaskType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.TaskType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.TaskType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.TaskType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.TaskType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.TaskType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.TaskType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.TaskType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.TaskType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.TaskType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.TaskType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.TaskType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.TaskType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.TaskType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.TaskType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.TaskType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.TaskType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.TaskType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.TaskType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.TaskType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.TaskType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.TaskType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.TaskType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.TaskType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.TaskType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.TaskType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.TaskType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.TaskType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.TaskType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.TaskType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.TaskType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.TaskType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.TaskType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.TaskType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.TaskType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.TaskType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.TaskType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.TaskType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.TaskType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.TaskType.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.TaskType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.TaskType.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.TaskType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.TextCompletion.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TextCompletion.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TextCompletion.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TextCompletion.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TextCompletion.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TextCompletion.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TextCompletion.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TextCompletion.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TextPrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TextPrompt.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TextPrompt.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TextPrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TextPrompt.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TextPrompt.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TextPrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TextPrompt.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Tool.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Tool.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.Tool.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.Tool.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Tool.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.Tool.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.Tool.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Tool.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.ToolConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ToolConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.ToolConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.ToolConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ToolConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.ToolConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.ToolConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ToolConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TransferOwnershipRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TransferOwnershipRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TransferOwnershipRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TransferOwnershipRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TransferOwnershipRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TransferOwnershipRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TransferOwnershipRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TransferOwnershipRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TransferOwnershipResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TransferOwnershipResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TransferOwnershipResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TransferOwnershipResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TransferOwnershipResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TransferOwnershipResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TransferOwnershipResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TransferOwnershipResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TunedModel.State": "google.generativeai.types.TunedModelState",
+ "google.generativeai.protos.TunedModel.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.TunedModel.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.TunedModel.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.TunedModel.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.TunedModel.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.TunedModel.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.TunedModel.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.TunedModel.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.TunedModel.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.TunedModel.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.TunedModel.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.TunedModel.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.TunedModel.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.TunedModel.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.TunedModel.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.TunedModel.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.TunedModel.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.TunedModel.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.TunedModel.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.TunedModel.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.TunedModel.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.TunedModel.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.TunedModel.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.TunedModel.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.TunedModel.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.TunedModel.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.TunedModel.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.TunedModel.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.TunedModel.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.TunedModel.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.TunedModel.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.TunedModel.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.TunedModel.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.TunedModel.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.TunedModel.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.TunedModel.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.TunedModel.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.TunedModel.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.TunedModel.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.TunedModel.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.TunedModel.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.TunedModel.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.TunedModel.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.TunedModel.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.TunedModel.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.TunedModel.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.TunedModel.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TunedModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TunedModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TunedModel.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TunedModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TunedModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TunedModel.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TunedModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TunedModelSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TunedModelSource.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TunedModelSource.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TunedModelSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TunedModelSource.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TunedModelSource.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TunedModelSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TunedModelSource.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TuningExample.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningExample.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TuningExample.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TuningExample.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningExample.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TuningExample.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TuningExample.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningExample.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TuningExamples.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningExamples.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TuningExamples.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TuningExamples.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningExamples.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TuningExamples.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TuningExamples.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningExamples.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TuningSnapshot.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningSnapshot.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TuningSnapshot.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TuningSnapshot.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningSnapshot.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TuningSnapshot.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TuningSnapshot.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningSnapshot.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.TuningTask.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningTask.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.TuningTask.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.TuningTask.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningTask.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.TuningTask.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.TuningTask.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningTask.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.Type.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Type.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Type.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Type.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Type.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Type.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Type.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Type.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Type.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Type.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Type.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Type.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Type.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Type.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Type.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Type.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Type.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Type.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Type.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Type.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Type.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Type.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Type.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Type.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Type.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Type.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Type.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Type.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Type.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Type.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Type.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Type.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Type.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Type.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Type.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Type.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Type.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Type.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Type.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Type.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Type.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Type.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Type.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Type.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Type.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Type.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.UpdateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.UpdateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.UpdateChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.UpdateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.UpdateChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.UpdateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.UpdateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.UpdateCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.UpdateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.UpdateCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.UpdateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.UpdateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.UpdateDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.UpdateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.UpdateDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.UpdateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.UpdatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdatePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.UpdatePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.UpdatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdatePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.UpdatePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.UpdatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdatePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.protos.VideoMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.VideoMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.protos.VideoMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.protos.VideoMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.VideoMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.protos.VideoMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.protos.VideoMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.VideoMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.AuthorError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.AuthorError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.AuthorError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.AuthorError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.AuthorError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.AuthorError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.BlockedPromptException.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.BlockedPromptException.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.BlockedPromptException.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.BlockedPromptException.__init__": "google.generativeai.types.AuthorError.__init__",
+ "google.generativeai.types.BlockedPromptException.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.BlockedPromptException.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.BlockedPromptException.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.BlockedPromptException.__new__": "google.generativeai.types.AuthorError.__new__",
+ "google.generativeai.types.BlockedPromptException.add_note": "google.generativeai.types.AuthorError.add_note",
+ "google.generativeai.types.BlockedPromptException.args": "google.generativeai.types.AuthorError.args",
+ "google.generativeai.types.BlockedPromptException.with_traceback": "google.generativeai.types.AuthorError.with_traceback",
+ "google.generativeai.types.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.BlockedReason.__contains__": "google.generativeai.protos.ContentFilter.BlockedReason.__contains__",
+ "google.generativeai.types.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.BlockedReason.__getitem__": "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__",
+ "google.generativeai.types.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.BlockedReason.__iter__": "google.generativeai.protos.ContentFilter.BlockedReason.__iter__",
+ "google.generativeai.types.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.BlockedReason.__len__": "google.generativeai.protos.ContentFilter.BlockedReason.__len__",
+ "google.generativeai.types.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.BlockedReason.from_bytes": "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes",
+ "google.generativeai.types.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.BrokenResponseError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.BrokenResponseError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.BrokenResponseError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.BrokenResponseError.__init__": "google.generativeai.types.AuthorError.__init__",
+ "google.generativeai.types.BrokenResponseError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.BrokenResponseError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.BrokenResponseError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.BrokenResponseError.__new__": "google.generativeai.types.AuthorError.__new__",
+ "google.generativeai.types.BrokenResponseError.add_note": "google.generativeai.types.AuthorError.add_note",
+ "google.generativeai.types.BrokenResponseError.args": "google.generativeai.types.AuthorError.args",
+ "google.generativeai.types.BrokenResponseError.with_traceback": "google.generativeai.types.AuthorError.with_traceback",
+ "google.generativeai.types.CallableFunctionDeclaration.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.CallableFunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.CallableFunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.CallableFunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.CallableFunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.CallableFunctionDeclaration.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.CallableFunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.CallableFunctionDeclaration.description": "google.generativeai.types.FunctionDeclaration.description",
+ "google.generativeai.types.CallableFunctionDeclaration.from_function": "google.generativeai.types.FunctionDeclaration.from_function",
+ "google.generativeai.types.CallableFunctionDeclaration.name": "google.generativeai.types.FunctionDeclaration.name",
+ "google.generativeai.types.CallableFunctionDeclaration.parameters": "google.generativeai.types.FunctionDeclaration.parameters",
+ "google.generativeai.types.CallableFunctionDeclaration.to_proto": "google.generativeai.types.FunctionDeclaration.to_proto",
+ "google.generativeai.types.ChatResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.ChatResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.ChatResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.ChatResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.ChatResponse.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.ChatResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.CitationMetadataDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.CitationMetadataDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.CitationMetadataDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.CitationMetadataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.CitationMetadataDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.CitationMetadataDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.CitationMetadataDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.CitationMetadataDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.CitationMetadataDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.CitationMetadataDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.CitationMetadataDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.CitationMetadataDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.CitationMetadataDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.CitationMetadataDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.CitationMetadataDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.CitationMetadataDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.CitationMetadataDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.CitationMetadataDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.CitationMetadataDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.CitationMetadataDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.CitationMetadataDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.CitationMetadataDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.CitationMetadataDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.CitationMetadataDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.CitationSourceDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.CitationSourceDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.CitationSourceDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.CitationSourceDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.CitationSourceDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.CitationSourceDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.CitationSourceDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.CitationSourceDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.CitationSourceDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.CitationSourceDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.CitationSourceDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.CitationSourceDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.CitationSourceDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.CitationSourceDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.CitationSourceDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.CitationSourceDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.CitationSourceDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.CitationSourceDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.CitationSourceDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.CitationSourceDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.CitationSourceDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.CitationSourceDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.CitationSourceDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.CitationSourceDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.Completion.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.Completion.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.Completion.__init__": "google.generativeai.types.ChatResponse.__init__",
+ "google.generativeai.types.Completion.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.Completion.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.Completion.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.Completion.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.ContentDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ContentDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ContentDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ContentDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ContentDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ContentDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ContentDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ContentDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ContentDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ContentDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ContentDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ContentDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ContentDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ContentDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ContentDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ContentDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ContentDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ContentDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ContentDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ContentDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ContentDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ContentDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ContentDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ContentDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.ContentFilterDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ContentFilterDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ContentFilterDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ContentFilterDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ContentFilterDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ContentFilterDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ContentFilterDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ContentFilterDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ContentFilterDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ContentFilterDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ContentFilterDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ContentFilterDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ContentFilterDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ContentFilterDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ContentFilterDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ContentFilterDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ContentFilterDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ContentFilterDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ContentFilterDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ContentFilterDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ContentFilterDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ContentFilterDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ContentFilterDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ContentFilterDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.ExampleDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ExampleDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ExampleDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ExampleDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ExampleDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ExampleDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ExampleDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ExampleDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ExampleDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ExampleDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ExampleDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ExampleDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ExampleDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ExampleDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ExampleDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ExampleDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ExampleDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ExampleDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ExampleDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ExampleDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ExampleDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ExampleDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ExampleDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ExampleDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.File.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.File.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.File.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.File.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.File.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.File.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.File.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.FileDataDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.FileDataDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.FileDataDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.FileDataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.FileDataDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.FileDataDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.FileDataDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.FileDataDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.FileDataDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.FileDataDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.FileDataDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.FileDataDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.FileDataDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.FileDataDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.FileDataDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.FileDataDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.FileDataDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.FileDataDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.FileDataDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.FileDataDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.FileDataDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.FileDataDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.FileDataDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.FileDataDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.FunctionDeclaration.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.FunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.FunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.FunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.FunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.FunctionDeclaration.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.FunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.FunctionLibrary.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.FunctionLibrary.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.FunctionLibrary.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.FunctionLibrary.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.FunctionLibrary.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.FunctionLibrary.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.FunctionLibrary.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.GenerateContentResponse.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.GenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.GenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.GenerateContentResponse.__init__": "google.generativeai.types.AsyncGenerateContentResponse.__init__",
+ "google.generativeai.types.GenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.GenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.GenerateContentResponse.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.GenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.GenerateContentResponse.candidates": "google.generativeai.types.AsyncGenerateContentResponse.candidates",
+ "google.generativeai.types.GenerateContentResponse.parts": "google.generativeai.types.AsyncGenerateContentResponse.parts",
+ "google.generativeai.types.GenerateContentResponse.prompt_feedback": "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback",
+ "google.generativeai.types.GenerateContentResponse.text": "google.generativeai.types.AsyncGenerateContentResponse.text",
+ "google.generativeai.types.GenerateContentResponse.to_dict": "google.generativeai.types.AsyncGenerateContentResponse.to_dict",
+ "google.generativeai.types.GenerateContentResponse.usage_metadata": "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata",
+ "google.generativeai.types.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.GenerationConfig.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.GenerationConfigDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.GenerationConfigDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.GenerationConfigDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.GenerationConfigDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.GenerationConfigDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.GenerationConfigDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.GenerationConfigDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.GenerationConfigDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.GenerationConfigDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.GenerationConfigDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.GenerationConfigDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.GenerationConfigDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.GenerationConfigDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.GenerationConfigDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.GenerationConfigDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.GenerationConfigDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.GenerationConfigDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.GenerationConfigDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.GenerationConfigDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.GenerationConfigDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.GenerationConfigDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.GenerationConfigDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.GenerationConfigDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.GenerationConfigDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.HarmBlockThreshold.__contains__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__",
+ "google.generativeai.types.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.HarmBlockThreshold.__getitem__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__",
+ "google.generativeai.types.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.HarmBlockThreshold.__iter__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__",
+ "google.generativeai.types.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.HarmBlockThreshold.__len__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__",
+ "google.generativeai.types.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.HarmBlockThreshold.from_bytes": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes",
+ "google.generativeai.types.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.HarmProbability.__contains__": "google.generativeai.protos.SafetyRating.HarmProbability.__contains__",
+ "google.generativeai.types.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.HarmProbability.__getitem__": "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__",
+ "google.generativeai.types.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.HarmProbability.__iter__": "google.generativeai.protos.SafetyRating.HarmProbability.__iter__",
+ "google.generativeai.types.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.HarmProbability.__len__": "google.generativeai.protos.SafetyRating.HarmProbability.__len__",
+ "google.generativeai.types.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.HarmProbability.from_bytes": "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes",
+ "google.generativeai.types.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.IncompleteIterationError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.IncompleteIterationError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.IncompleteIterationError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.IncompleteIterationError.__init__": "google.generativeai.types.AuthorError.__init__",
+ "google.generativeai.types.IncompleteIterationError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.IncompleteIterationError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.IncompleteIterationError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.IncompleteIterationError.__new__": "google.generativeai.types.AuthorError.__new__",
+ "google.generativeai.types.IncompleteIterationError.add_note": "google.generativeai.types.AuthorError.add_note",
+ "google.generativeai.types.IncompleteIterationError.args": "google.generativeai.types.AuthorError.args",
+ "google.generativeai.types.IncompleteIterationError.with_traceback": "google.generativeai.types.AuthorError.with_traceback",
+ "google.generativeai.types.MessageDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.MessageDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.MessageDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.MessageDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.MessageDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.MessageDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.MessageDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.MessageDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.MessageDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.MessageDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.MessageDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.MessageDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.MessageDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.MessageDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.MessageDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.MessageDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.MessageDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.MessageDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.MessageDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.MessageDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.MessageDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.MessageDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.MessageDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.MessageDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.MessagePromptDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.MessagePromptDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.MessagePromptDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.MessagePromptDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.MessagePromptDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.MessagePromptDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.MessagePromptDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.MessagePromptDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.MessagePromptDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.MessagePromptDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.MessagePromptDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.MessagePromptDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.MessagePromptDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.MessagePromptDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.MessagePromptDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.MessagePromptDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.MessagePromptDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.MessagePromptDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.MessagePromptDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.MessagePromptDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.MessagePromptDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.MessagePromptDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.MessagePromptDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.MessagePromptDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.Model.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.Model.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.Model.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.Model.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.Model.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.Model.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.ModelNameOptions": "google.generativeai.types.AnyModelNameOptions",
+ "google.generativeai.types.PartDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.PartDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.PartDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.PartDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.PartDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.PartDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.PartDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.PartDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.PartDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.PartDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.PartDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.PartDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.PartDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.PartDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.PartDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.PartDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.PartDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.PartDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.PartDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.PartDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.PartDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.PartDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.PartDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.PartDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.Permission.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.Permission.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.Permission.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.Permission.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.Permission.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.Permission.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.Permissions.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.Permissions.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.Permissions.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.Permissions.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.Permissions.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.Permissions.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.Permissions.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.RequestOptions.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.RequestOptions.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.RequestOptions.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.RequestOptions.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.RequestOptions.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.RequestOptions.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.ResponseDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ResponseDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ResponseDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ResponseDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ResponseDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ResponseDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ResponseDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ResponseDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ResponseDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ResponseDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ResponseDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ResponseDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ResponseDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ResponseDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ResponseDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ResponseDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ResponseDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ResponseDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ResponseDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ResponseDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ResponseDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ResponseDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ResponseDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ResponseDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.SafetyFeedbackDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.SafetyFeedbackDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.SafetyFeedbackDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.SafetyFeedbackDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.SafetyFeedbackDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.SafetyFeedbackDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.SafetyFeedbackDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.SafetyFeedbackDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.SafetyFeedbackDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.SafetyFeedbackDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.SafetyFeedbackDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.SafetyFeedbackDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.SafetyFeedbackDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.SafetyFeedbackDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.SafetyFeedbackDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.SafetyFeedbackDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.SafetyFeedbackDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.SafetyFeedbackDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.SafetyFeedbackDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.SafetyFeedbackDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.SafetyFeedbackDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.SafetyFeedbackDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.SafetyFeedbackDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.SafetyFeedbackDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.SafetyRatingDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.SafetyRatingDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.SafetyRatingDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.SafetyRatingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.SafetyRatingDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.SafetyRatingDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.SafetyRatingDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.SafetyRatingDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.SafetyRatingDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.SafetyRatingDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.SafetyRatingDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.SafetyRatingDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.SafetyRatingDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.SafetyRatingDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.SafetyRatingDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.SafetyRatingDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.SafetyRatingDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.SafetyRatingDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.SafetyRatingDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.SafetyRatingDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.SafetyRatingDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.SafetyRatingDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.SafetyRatingDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.SafetyRatingDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.SafetySettingDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.SafetySettingDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.SafetySettingDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.SafetySettingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.SafetySettingDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.SafetySettingDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.SafetySettingDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.SafetySettingDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.SafetySettingDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.SafetySettingDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.SafetySettingDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.SafetySettingDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.SafetySettingDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.SafetySettingDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.SafetySettingDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.SafetySettingDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.SafetySettingDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.SafetySettingDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.SafetySettingDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.SafetySettingDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.SafetySettingDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.SafetySettingDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.SafetySettingDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.SafetySettingDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.StopCandidateException.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.StopCandidateException.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.StopCandidateException.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.StopCandidateException.__init__": "google.generativeai.types.AuthorError.__init__",
+ "google.generativeai.types.StopCandidateException.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.StopCandidateException.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.StopCandidateException.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.StopCandidateException.__new__": "google.generativeai.types.AuthorError.__new__",
+ "google.generativeai.types.StopCandidateException.add_note": "google.generativeai.types.AuthorError.add_note",
+ "google.generativeai.types.StopCandidateException.args": "google.generativeai.types.AuthorError.args",
+ "google.generativeai.types.StopCandidateException.with_traceback": "google.generativeai.types.AuthorError.with_traceback",
+ "google.generativeai.types.Tool.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__",
+ "google.generativeai.types.Tool.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.Tool.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.Tool.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.Tool.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.Tool.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.Tool.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.ToolDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ToolDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ToolDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ToolDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ToolDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ToolDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ToolDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ToolDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ToolDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ToolDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ToolDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ToolDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ToolDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ToolDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ToolDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ToolDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ToolDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ToolDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ToolDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ToolDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ToolDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ToolDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ToolDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ToolDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.TunedModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__",
+ "google.generativeai.types.TunedModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__",
+ "google.generativeai.types.TunedModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__",
+ "google.generativeai.types.TunedModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__",
+ "google.generativeai.types.TunedModel.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__",
+ "google.generativeai.types.TunedModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__",
+ "google.generativeai.types.TunedModelState.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.TunedModelState.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.TunedModelState.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.TunedModelState.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.TunedModelState.__contains__": "google.generativeai.protos.TunedModel.State.__contains__",
+ "google.generativeai.types.TunedModelState.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.TunedModelState.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.TunedModelState.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.TunedModelState.__getitem__": "google.generativeai.protos.TunedModel.State.__getitem__",
+ "google.generativeai.types.TunedModelState.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.TunedModelState.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.TunedModelState.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.TunedModelState.__iter__": "google.generativeai.protos.TunedModel.State.__iter__",
+ "google.generativeai.types.TunedModelState.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.TunedModelState.__len__": "google.generativeai.protos.TunedModel.State.__len__",
+ "google.generativeai.types.TunedModelState.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.TunedModelState.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.TunedModelState.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.TunedModelState.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.TunedModelState.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.TunedModelState.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.TunedModelState.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.TunedModelState.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.TunedModelState.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.TunedModelState.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.TunedModelState.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.TunedModelState.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.TunedModelState.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.TunedModelState.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.TunedModelState.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.TunedModelState.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.TunedModelState.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.TunedModelState.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.TunedModelState.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.TunedModelState.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.TunedModelState.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.TunedModelState.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.TunedModelState.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.TunedModelState.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.TunedModelState.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.TunedModelState.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.TunedModelState.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.TunedModelState.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.TunedModelState.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.TunedModelState.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.TunedModelState.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.TunedModelState.from_bytes": "google.generativeai.protos.TunedModel.State.from_bytes",
+ "google.generativeai.types.TunedModelState.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.TunedModelState.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.TunedModelState.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.TunedModelState.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes"
+ },
+ "is_fragment": {
+ "google.generativeai": false,
+ "google.generativeai.ChatSession": false,
+ "google.generativeai.ChatSession.__eq__": true,
+ "google.generativeai.ChatSession.__ge__": true,
+ "google.generativeai.ChatSession.__gt__": true,
+ "google.generativeai.ChatSession.__init__": true,
+ "google.generativeai.ChatSession.__le__": true,
+ "google.generativeai.ChatSession.__lt__": true,
+ "google.generativeai.ChatSession.__ne__": true,
+ "google.generativeai.ChatSession.__new__": true,
+ "google.generativeai.ChatSession.history": true,
+ "google.generativeai.ChatSession.last": true,
+ "google.generativeai.ChatSession.rewind": true,
+ "google.generativeai.ChatSession.send_message": true,
+ "google.generativeai.ChatSession.send_message_async": true,
+ "google.generativeai.GenerationConfig": false,
+ "google.generativeai.GenerationConfig.__eq__": true,
+ "google.generativeai.GenerationConfig.__ge__": true,
+ "google.generativeai.GenerationConfig.__gt__": true,
+ "google.generativeai.GenerationConfig.__init__": true,
+ "google.generativeai.GenerationConfig.__le__": true,
+ "google.generativeai.GenerationConfig.__lt__": true,
+ "google.generativeai.GenerationConfig.__ne__": true,
+ "google.generativeai.GenerationConfig.__new__": true,
+ "google.generativeai.GenerationConfig.candidate_count": true,
+ "google.generativeai.GenerationConfig.max_output_tokens": true,
+ "google.generativeai.GenerationConfig.response_mime_type": true,
+ "google.generativeai.GenerationConfig.response_schema": true,
+ "google.generativeai.GenerationConfig.stop_sequences": true,
+ "google.generativeai.GenerationConfig.temperature": true,
+ "google.generativeai.GenerationConfig.top_k": true,
+ "google.generativeai.GenerationConfig.top_p": true,
+ "google.generativeai.GenerativeModel": false,
+ "google.generativeai.GenerativeModel.__eq__": true,
+ "google.generativeai.GenerativeModel.__ge__": true,
+ "google.generativeai.GenerativeModel.__gt__": true,
+ "google.generativeai.GenerativeModel.__init__": true,
+ "google.generativeai.GenerativeModel.__le__": true,
+ "google.generativeai.GenerativeModel.__lt__": true,
+ "google.generativeai.GenerativeModel.__ne__": true,
+ "google.generativeai.GenerativeModel.__new__": true,
+ "google.generativeai.GenerativeModel.cached_content": true,
+ "google.generativeai.GenerativeModel.count_tokens": true,
+ "google.generativeai.GenerativeModel.count_tokens_async": true,
+ "google.generativeai.GenerativeModel.from_cached_content": true,
+ "google.generativeai.GenerativeModel.generate_content": true,
+ "google.generativeai.GenerativeModel.generate_content_async": true,
+ "google.generativeai.GenerativeModel.model_name": true,
+ "google.generativeai.GenerativeModel.start_chat": true,
+ "google.generativeai.__version__": true,
+ "google.generativeai.annotations": true,
+ "google.generativeai.chat": false,
+ "google.generativeai.chat_async": false,
+ "google.generativeai.configure": false,
+ "google.generativeai.count_message_tokens": false,
+ "google.generativeai.count_text_tokens": false,
+ "google.generativeai.create_tuned_model": false,
+ "google.generativeai.delete_file": false,
+ "google.generativeai.delete_tuned_model": false,
+ "google.generativeai.embed_content": false,
+ "google.generativeai.embed_content_async": false,
+ "google.generativeai.generate_embeddings": false,
+ "google.generativeai.generate_text": false,
+ "google.generativeai.get_base_model": false,
+ "google.generativeai.get_file": false,
+ "google.generativeai.get_model": false,
+ "google.generativeai.get_operation": false,
+ "google.generativeai.get_tuned_model": false,
+ "google.generativeai.list_files": false,
+ "google.generativeai.list_models": false,
+ "google.generativeai.list_operations": false,
+ "google.generativeai.list_tuned_models": false,
+ "google.generativeai.protos": false,
+ "google.generativeai.protos.AttributionSourceId": false,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId": false,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__call__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__or__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ror__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.mro": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.part_index": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.passage_id": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": false,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__call__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__or__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ror__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.chunk": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.mro": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.source": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": true,
+ "google.generativeai.protos.AttributionSourceId.__call__": true,
+ "google.generativeai.protos.AttributionSourceId.__eq__": true,
+ "google.generativeai.protos.AttributionSourceId.__ge__": true,
+ "google.generativeai.protos.AttributionSourceId.__gt__": true,
+ "google.generativeai.protos.AttributionSourceId.__init__": true,
+ "google.generativeai.protos.AttributionSourceId.__le__": true,
+ "google.generativeai.protos.AttributionSourceId.__lt__": true,
+ "google.generativeai.protos.AttributionSourceId.__ne__": true,
+ "google.generativeai.protos.AttributionSourceId.__new__": true,
+ "google.generativeai.protos.AttributionSourceId.__or__": true,
+ "google.generativeai.protos.AttributionSourceId.__ror__": true,
+ "google.generativeai.protos.AttributionSourceId.copy_from": true,
+ "google.generativeai.protos.AttributionSourceId.deserialize": true,
+ "google.generativeai.protos.AttributionSourceId.from_json": true,
+ "google.generativeai.protos.AttributionSourceId.grounding_passage": true,
+ "google.generativeai.protos.AttributionSourceId.mro": true,
+ "google.generativeai.protos.AttributionSourceId.pb": true,
+ "google.generativeai.protos.AttributionSourceId.semantic_retriever_chunk": true,
+ "google.generativeai.protos.AttributionSourceId.serialize": true,
+ "google.generativeai.protos.AttributionSourceId.to_dict": true,
+ "google.generativeai.protos.AttributionSourceId.to_json": true,
+ "google.generativeai.protos.AttributionSourceId.wrap": true,
+ "google.generativeai.protos.BatchCreateChunksRequest": false,
+ "google.generativeai.protos.BatchCreateChunksRequest.__call__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__eq__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__ge__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__gt__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__init__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__le__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__lt__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__ne__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__new__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__or__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__ror__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.copy_from": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.deserialize": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.from_json": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.mro": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.parent": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.pb": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.requests": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.serialize": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.to_dict": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.to_json": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.wrap": true,
+ "google.generativeai.protos.BatchCreateChunksResponse": false,
+ "google.generativeai.protos.BatchCreateChunksResponse.__call__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__eq__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__ge__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__gt__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__init__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__le__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__lt__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__ne__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__new__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__or__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__ror__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.chunks": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.copy_from": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.deserialize": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.from_json": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.mro": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.pb": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.serialize": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.to_dict": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.to_json": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.wrap": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest": false,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__call__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__init__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__le__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__new__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__or__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ror__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.from_json": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.mro": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.parent": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.pb": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.requests": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.serialize": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_json": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.wrap": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest": false,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__call__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__init__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__le__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__new__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__or__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ror__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.from_json": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.model": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.mro": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.pb": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.requests": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.serialize": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_json": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.wrap": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse": false,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__call__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__init__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__le__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__new__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__or__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ror__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.embeddings": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.from_json": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.mro": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.pb": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.serialize": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_json": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.wrap": true,
+ "google.generativeai.protos.BatchEmbedTextRequest": false,
+ "google.generativeai.protos.BatchEmbedTextRequest.__call__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__eq__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__ge__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__gt__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__init__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__le__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__lt__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__ne__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__new__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__or__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__ror__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.copy_from": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.deserialize": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.from_json": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.model": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.mro": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.pb": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.requests": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.serialize": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.texts": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.to_dict": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.to_json": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.wrap": true,
+ "google.generativeai.protos.BatchEmbedTextResponse": false,
+ "google.generativeai.protos.BatchEmbedTextResponse.__call__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__eq__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__ge__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__gt__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__init__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__le__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__lt__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__ne__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__new__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__or__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__ror__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.copy_from": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.deserialize": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.embeddings": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.from_json": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.mro": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.pb": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.serialize": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.to_dict": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.to_json": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.wrap": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest": false,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__call__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__init__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__le__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__new__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__or__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ror__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.from_json": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.mro": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.parent": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.pb": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.requests": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.serialize": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_json": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.wrap": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse": false,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__call__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__init__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__le__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__new__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__or__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ror__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.chunks": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.from_json": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.mro": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.pb": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.serialize": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_json": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.wrap": true,
+ "google.generativeai.protos.Blob": false,
+ "google.generativeai.protos.Blob.__call__": true,
+ "google.generativeai.protos.Blob.__eq__": true,
+ "google.generativeai.protos.Blob.__ge__": true,
+ "google.generativeai.protos.Blob.__gt__": true,
+ "google.generativeai.protos.Blob.__init__": true,
+ "google.generativeai.protos.Blob.__le__": true,
+ "google.generativeai.protos.Blob.__lt__": true,
+ "google.generativeai.protos.Blob.__ne__": true,
+ "google.generativeai.protos.Blob.__new__": true,
+ "google.generativeai.protos.Blob.__or__": true,
+ "google.generativeai.protos.Blob.__ror__": true,
+ "google.generativeai.protos.Blob.copy_from": true,
+ "google.generativeai.protos.Blob.data": true,
+ "google.generativeai.protos.Blob.deserialize": true,
+ "google.generativeai.protos.Blob.from_json": true,
+ "google.generativeai.protos.Blob.mime_type": true,
+ "google.generativeai.protos.Blob.mro": true,
+ "google.generativeai.protos.Blob.pb": true,
+ "google.generativeai.protos.Blob.serialize": true,
+ "google.generativeai.protos.Blob.to_dict": true,
+ "google.generativeai.protos.Blob.to_json": true,
+ "google.generativeai.protos.Blob.wrap": true,
+ "google.generativeai.protos.CachedContent": false,
+ "google.generativeai.protos.CachedContent.UsageMetadata": false,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__call__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__init__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__le__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__new__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__or__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ror__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.from_json": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.mro": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.pb": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.serialize": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_json": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.total_token_count": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.wrap": true,
+ "google.generativeai.protos.CachedContent.__call__": true,
+ "google.generativeai.protos.CachedContent.__eq__": true,
+ "google.generativeai.protos.CachedContent.__ge__": true,
+ "google.generativeai.protos.CachedContent.__gt__": true,
+ "google.generativeai.protos.CachedContent.__init__": true,
+ "google.generativeai.protos.CachedContent.__le__": true,
+ "google.generativeai.protos.CachedContent.__lt__": true,
+ "google.generativeai.protos.CachedContent.__ne__": true,
+ "google.generativeai.protos.CachedContent.__new__": true,
+ "google.generativeai.protos.CachedContent.__or__": true,
+ "google.generativeai.protos.CachedContent.__ror__": true,
+ "google.generativeai.protos.CachedContent.contents": true,
+ "google.generativeai.protos.CachedContent.copy_from": true,
+ "google.generativeai.protos.CachedContent.create_time": true,
+ "google.generativeai.protos.CachedContent.deserialize": true,
+ "google.generativeai.protos.CachedContent.display_name": true,
+ "google.generativeai.protos.CachedContent.expire_time": true,
+ "google.generativeai.protos.CachedContent.from_json": true,
+ "google.generativeai.protos.CachedContent.model": true,
+ "google.generativeai.protos.CachedContent.mro": true,
+ "google.generativeai.protos.CachedContent.name": true,
+ "google.generativeai.protos.CachedContent.pb": true,
+ "google.generativeai.protos.CachedContent.serialize": true,
+ "google.generativeai.protos.CachedContent.system_instruction": true,
+ "google.generativeai.protos.CachedContent.to_dict": true,
+ "google.generativeai.protos.CachedContent.to_json": true,
+ "google.generativeai.protos.CachedContent.tool_config": true,
+ "google.generativeai.protos.CachedContent.tools": true,
+ "google.generativeai.protos.CachedContent.ttl": true,
+ "google.generativeai.protos.CachedContent.update_time": true,
+ "google.generativeai.protos.CachedContent.usage_metadata": true,
+ "google.generativeai.protos.CachedContent.wrap": true,
+ "google.generativeai.protos.Candidate": false,
+ "google.generativeai.protos.Candidate.FinishReason": false,
+ "google.generativeai.protos.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.Candidate.FinishReason.MAX_TOKENS": true,
+ "google.generativeai.protos.Candidate.FinishReason.OTHER": true,
+ "google.generativeai.protos.Candidate.FinishReason.RECITATION": true,
+ "google.generativeai.protos.Candidate.FinishReason.SAFETY": true,
+ "google.generativeai.protos.Candidate.FinishReason.STOP": true,
+ "google.generativeai.protos.Candidate.FinishReason.__abs__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__add__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__and__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__bool__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__contains__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__eq__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__floordiv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__ge__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__getitem__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__gt__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__init__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__invert__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__iter__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__le__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__len__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__lshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__lt__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__mod__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__mul__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__ne__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__neg__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__new__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__or__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__pos__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__pow__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__radd__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rand__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rlshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rmod__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rmul__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__ror__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rpow__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rrshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rsub__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rtruediv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rxor__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__sub__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__truediv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__xor__": true,
+ "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio": true,
+ "google.generativeai.protos.Candidate.FinishReason.bit_count": true,
+ "google.generativeai.protos.Candidate.FinishReason.bit_length": true,
+ "google.generativeai.protos.Candidate.FinishReason.conjugate": true,
+ "google.generativeai.protos.Candidate.FinishReason.denominator": true,
+ "google.generativeai.protos.Candidate.FinishReason.from_bytes": true,
+ "google.generativeai.protos.Candidate.FinishReason.imag": true,
+ "google.generativeai.protos.Candidate.FinishReason.numerator": true,
+ "google.generativeai.protos.Candidate.FinishReason.real": true,
+ "google.generativeai.protos.Candidate.FinishReason.to_bytes": true,
+ "google.generativeai.protos.Candidate.__call__": true,
+ "google.generativeai.protos.Candidate.__eq__": true,
+ "google.generativeai.protos.Candidate.__ge__": true,
+ "google.generativeai.protos.Candidate.__gt__": true,
+ "google.generativeai.protos.Candidate.__init__": true,
+ "google.generativeai.protos.Candidate.__le__": true,
+ "google.generativeai.protos.Candidate.__lt__": true,
+ "google.generativeai.protos.Candidate.__ne__": true,
+ "google.generativeai.protos.Candidate.__new__": true,
+ "google.generativeai.protos.Candidate.__or__": true,
+ "google.generativeai.protos.Candidate.__ror__": true,
+ "google.generativeai.protos.Candidate.citation_metadata": true,
+ "google.generativeai.protos.Candidate.content": true,
+ "google.generativeai.protos.Candidate.copy_from": true,
+ "google.generativeai.protos.Candidate.deserialize": true,
+ "google.generativeai.protos.Candidate.finish_reason": true,
+ "google.generativeai.protos.Candidate.from_json": true,
+ "google.generativeai.protos.Candidate.grounding_attributions": true,
+ "google.generativeai.protos.Candidate.index": true,
+ "google.generativeai.protos.Candidate.mro": true,
+ "google.generativeai.protos.Candidate.pb": true,
+ "google.generativeai.protos.Candidate.safety_ratings": true,
+ "google.generativeai.protos.Candidate.serialize": true,
+ "google.generativeai.protos.Candidate.to_dict": true,
+ "google.generativeai.protos.Candidate.to_json": true,
+ "google.generativeai.protos.Candidate.token_count": true,
+ "google.generativeai.protos.Candidate.wrap": true,
+ "google.generativeai.protos.Chunk": false,
+ "google.generativeai.protos.Chunk.State": false,
+ "google.generativeai.protos.Chunk.State.STATE_ACTIVE": true,
+ "google.generativeai.protos.Chunk.State.STATE_FAILED": true,
+ "google.generativeai.protos.Chunk.State.STATE_PENDING_PROCESSING": true,
+ "google.generativeai.protos.Chunk.State.STATE_UNSPECIFIED": true,
+ "google.generativeai.protos.Chunk.State.__abs__": true,
+ "google.generativeai.protos.Chunk.State.__add__": true,
+ "google.generativeai.protos.Chunk.State.__and__": true,
+ "google.generativeai.protos.Chunk.State.__bool__": true,
+ "google.generativeai.protos.Chunk.State.__contains__": true,
+ "google.generativeai.protos.Chunk.State.__eq__": true,
+ "google.generativeai.protos.Chunk.State.__floordiv__": true,
+ "google.generativeai.protos.Chunk.State.__ge__": true,
+ "google.generativeai.protos.Chunk.State.__getitem__": true,
+ "google.generativeai.protos.Chunk.State.__gt__": true,
+ "google.generativeai.protos.Chunk.State.__init__": true,
+ "google.generativeai.protos.Chunk.State.__invert__": true,
+ "google.generativeai.protos.Chunk.State.__iter__": true,
+ "google.generativeai.protos.Chunk.State.__le__": true,
+ "google.generativeai.protos.Chunk.State.__len__": true,
+ "google.generativeai.protos.Chunk.State.__lshift__": true,
+ "google.generativeai.protos.Chunk.State.__lt__": true,
+ "google.generativeai.protos.Chunk.State.__mod__": true,
+ "google.generativeai.protos.Chunk.State.__mul__": true,
+ "google.generativeai.protos.Chunk.State.__ne__": true,
+ "google.generativeai.protos.Chunk.State.__neg__": true,
+ "google.generativeai.protos.Chunk.State.__new__": true,
+ "google.generativeai.protos.Chunk.State.__or__": true,
+ "google.generativeai.protos.Chunk.State.__pos__": true,
+ "google.generativeai.protos.Chunk.State.__pow__": true,
+ "google.generativeai.protos.Chunk.State.__radd__": true,
+ "google.generativeai.protos.Chunk.State.__rand__": true,
+ "google.generativeai.protos.Chunk.State.__rfloordiv__": true,
+ "google.generativeai.protos.Chunk.State.__rlshift__": true,
+ "google.generativeai.protos.Chunk.State.__rmod__": true,
+ "google.generativeai.protos.Chunk.State.__rmul__": true,
+ "google.generativeai.protos.Chunk.State.__ror__": true,
+ "google.generativeai.protos.Chunk.State.__rpow__": true,
+ "google.generativeai.protos.Chunk.State.__rrshift__": true,
+ "google.generativeai.protos.Chunk.State.__rshift__": true,
+ "google.generativeai.protos.Chunk.State.__rsub__": true,
+ "google.generativeai.protos.Chunk.State.__rtruediv__": true,
+ "google.generativeai.protos.Chunk.State.__rxor__": true,
+ "google.generativeai.protos.Chunk.State.__sub__": true,
+ "google.generativeai.protos.Chunk.State.__truediv__": true,
+ "google.generativeai.protos.Chunk.State.__xor__": true,
+ "google.generativeai.protos.Chunk.State.as_integer_ratio": true,
+ "google.generativeai.protos.Chunk.State.bit_count": true,
+ "google.generativeai.protos.Chunk.State.bit_length": true,
+ "google.generativeai.protos.Chunk.State.conjugate": true,
+ "google.generativeai.protos.Chunk.State.denominator": true,
+ "google.generativeai.protos.Chunk.State.from_bytes": true,
+ "google.generativeai.protos.Chunk.State.imag": true,
+ "google.generativeai.protos.Chunk.State.numerator": true,
+ "google.generativeai.protos.Chunk.State.real": true,
+ "google.generativeai.protos.Chunk.State.to_bytes": true,
+ "google.generativeai.protos.Chunk.__call__": true,
+ "google.generativeai.protos.Chunk.__eq__": true,
+ "google.generativeai.protos.Chunk.__ge__": true,
+ "google.generativeai.protos.Chunk.__gt__": true,
+ "google.generativeai.protos.Chunk.__init__": true,
+ "google.generativeai.protos.Chunk.__le__": true,
+ "google.generativeai.protos.Chunk.__lt__": true,
+ "google.generativeai.protos.Chunk.__ne__": true,
+ "google.generativeai.protos.Chunk.__new__": true,
+ "google.generativeai.protos.Chunk.__or__": true,
+ "google.generativeai.protos.Chunk.__ror__": true,
+ "google.generativeai.protos.Chunk.copy_from": true,
+ "google.generativeai.protos.Chunk.create_time": true,
+ "google.generativeai.protos.Chunk.custom_metadata": true,
+ "google.generativeai.protos.Chunk.data": true,
+ "google.generativeai.protos.Chunk.deserialize": true,
+ "google.generativeai.protos.Chunk.from_json": true,
+ "google.generativeai.protos.Chunk.mro": true,
+ "google.generativeai.protos.Chunk.name": true,
+ "google.generativeai.protos.Chunk.pb": true,
+ "google.generativeai.protos.Chunk.serialize": true,
+ "google.generativeai.protos.Chunk.state": true,
+ "google.generativeai.protos.Chunk.to_dict": true,
+ "google.generativeai.protos.Chunk.to_json": true,
+ "google.generativeai.protos.Chunk.update_time": true,
+ "google.generativeai.protos.Chunk.wrap": true,
+ "google.generativeai.protos.ChunkData": false,
+ "google.generativeai.protos.ChunkData.__call__": true,
+ "google.generativeai.protos.ChunkData.__eq__": true,
+ "google.generativeai.protos.ChunkData.__ge__": true,
+ "google.generativeai.protos.ChunkData.__gt__": true,
+ "google.generativeai.protos.ChunkData.__init__": true,
+ "google.generativeai.protos.ChunkData.__le__": true,
+ "google.generativeai.protos.ChunkData.__lt__": true,
+ "google.generativeai.protos.ChunkData.__ne__": true,
+ "google.generativeai.protos.ChunkData.__new__": true,
+ "google.generativeai.protos.ChunkData.__or__": true,
+ "google.generativeai.protos.ChunkData.__ror__": true,
+ "google.generativeai.protos.ChunkData.copy_from": true,
+ "google.generativeai.protos.ChunkData.deserialize": true,
+ "google.generativeai.protos.ChunkData.from_json": true,
+ "google.generativeai.protos.ChunkData.mro": true,
+ "google.generativeai.protos.ChunkData.pb": true,
+ "google.generativeai.protos.ChunkData.serialize": true,
+ "google.generativeai.protos.ChunkData.string_value": true,
+ "google.generativeai.protos.ChunkData.to_dict": true,
+ "google.generativeai.protos.ChunkData.to_json": true,
+ "google.generativeai.protos.ChunkData.wrap": true,
+ "google.generativeai.protos.CitationMetadata": false,
+ "google.generativeai.protos.CitationMetadata.__call__": true,
+ "google.generativeai.protos.CitationMetadata.__eq__": true,
+ "google.generativeai.protos.CitationMetadata.__ge__": true,
+ "google.generativeai.protos.CitationMetadata.__gt__": true,
+ "google.generativeai.protos.CitationMetadata.__init__": true,
+ "google.generativeai.protos.CitationMetadata.__le__": true,
+ "google.generativeai.protos.CitationMetadata.__lt__": true,
+ "google.generativeai.protos.CitationMetadata.__ne__": true,
+ "google.generativeai.protos.CitationMetadata.__new__": true,
+ "google.generativeai.protos.CitationMetadata.__or__": true,
+ "google.generativeai.protos.CitationMetadata.__ror__": true,
+ "google.generativeai.protos.CitationMetadata.citation_sources": true,
+ "google.generativeai.protos.CitationMetadata.copy_from": true,
+ "google.generativeai.protos.CitationMetadata.deserialize": true,
+ "google.generativeai.protos.CitationMetadata.from_json": true,
+ "google.generativeai.protos.CitationMetadata.mro": true,
+ "google.generativeai.protos.CitationMetadata.pb": true,
+ "google.generativeai.protos.CitationMetadata.serialize": true,
+ "google.generativeai.protos.CitationMetadata.to_dict": true,
+ "google.generativeai.protos.CitationMetadata.to_json": true,
+ "google.generativeai.protos.CitationMetadata.wrap": true,
+ "google.generativeai.protos.CitationSource": false,
+ "google.generativeai.protos.CitationSource.__call__": true,
+ "google.generativeai.protos.CitationSource.__eq__": true,
+ "google.generativeai.protos.CitationSource.__ge__": true,
+ "google.generativeai.protos.CitationSource.__gt__": true,
+ "google.generativeai.protos.CitationSource.__init__": true,
+ "google.generativeai.protos.CitationSource.__le__": true,
+ "google.generativeai.protos.CitationSource.__lt__": true,
+ "google.generativeai.protos.CitationSource.__ne__": true,
+ "google.generativeai.protos.CitationSource.__new__": true,
+ "google.generativeai.protos.CitationSource.__or__": true,
+ "google.generativeai.protos.CitationSource.__ror__": true,
+ "google.generativeai.protos.CitationSource.copy_from": true,
+ "google.generativeai.protos.CitationSource.deserialize": true,
+ "google.generativeai.protos.CitationSource.end_index": true,
+ "google.generativeai.protos.CitationSource.from_json": true,
+ "google.generativeai.protos.CitationSource.license_": true,
+ "google.generativeai.protos.CitationSource.mro": true,
+ "google.generativeai.protos.CitationSource.pb": true,
+ "google.generativeai.protos.CitationSource.serialize": true,
+ "google.generativeai.protos.CitationSource.start_index": true,
+ "google.generativeai.protos.CitationSource.to_dict": true,
+ "google.generativeai.protos.CitationSource.to_json": true,
+ "google.generativeai.protos.CitationSource.uri": true,
+ "google.generativeai.protos.CitationSource.wrap": true,
+ "google.generativeai.protos.CodeExecution": false,
+ "google.generativeai.protos.CodeExecution.__call__": true,
+ "google.generativeai.protos.CodeExecution.__eq__": true,
+ "google.generativeai.protos.CodeExecution.__ge__": true,
+ "google.generativeai.protos.CodeExecution.__gt__": true,
+ "google.generativeai.protos.CodeExecution.__init__": true,
+ "google.generativeai.protos.CodeExecution.__le__": true,
+ "google.generativeai.protos.CodeExecution.__lt__": true,
+ "google.generativeai.protos.CodeExecution.__ne__": true,
+ "google.generativeai.protos.CodeExecution.__new__": true,
+ "google.generativeai.protos.CodeExecution.__or__": true,
+ "google.generativeai.protos.CodeExecution.__ror__": true,
+ "google.generativeai.protos.CodeExecution.copy_from": true,
+ "google.generativeai.protos.CodeExecution.deserialize": true,
+ "google.generativeai.protos.CodeExecution.from_json": true,
+ "google.generativeai.protos.CodeExecution.mro": true,
+ "google.generativeai.protos.CodeExecution.pb": true,
+ "google.generativeai.protos.CodeExecution.serialize": true,
+ "google.generativeai.protos.CodeExecution.to_dict": true,
+ "google.generativeai.protos.CodeExecution.to_json": true,
+ "google.generativeai.protos.CodeExecution.wrap": true,
+ "google.generativeai.protos.CodeExecutionResult": false,
+ "google.generativeai.protos.CodeExecutionResult.Outcome": false,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_DEADLINE_EXCEEDED": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_FAILED": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_OK": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_UNSPECIFIED": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.from_bytes": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.imag": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.real": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": true,
+ "google.generativeai.protos.CodeExecutionResult.__call__": true,
+ "google.generativeai.protos.CodeExecutionResult.__eq__": true,
+ "google.generativeai.protos.CodeExecutionResult.__ge__": true,
+ "google.generativeai.protos.CodeExecutionResult.__gt__": true,
+ "google.generativeai.protos.CodeExecutionResult.__init__": true,
+ "google.generativeai.protos.CodeExecutionResult.__le__": true,
+ "google.generativeai.protos.CodeExecutionResult.__lt__": true,
+ "google.generativeai.protos.CodeExecutionResult.__ne__": true,
+ "google.generativeai.protos.CodeExecutionResult.__new__": true,
+ "google.generativeai.protos.CodeExecutionResult.__or__": true,
+ "google.generativeai.protos.CodeExecutionResult.__ror__": true,
+ "google.generativeai.protos.CodeExecutionResult.copy_from": true,
+ "google.generativeai.protos.CodeExecutionResult.deserialize": true,
+ "google.generativeai.protos.CodeExecutionResult.from_json": true,
+ "google.generativeai.protos.CodeExecutionResult.mro": true,
+ "google.generativeai.protos.CodeExecutionResult.outcome": true,
+ "google.generativeai.protos.CodeExecutionResult.output": true,
+ "google.generativeai.protos.CodeExecutionResult.pb": true,
+ "google.generativeai.protos.CodeExecutionResult.serialize": true,
+ "google.generativeai.protos.CodeExecutionResult.to_dict": true,
+ "google.generativeai.protos.CodeExecutionResult.to_json": true,
+ "google.generativeai.protos.CodeExecutionResult.wrap": true,
+ "google.generativeai.protos.Condition": false,
+ "google.generativeai.protos.Condition.Operator": false,
+ "google.generativeai.protos.Condition.Operator.EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.EXCLUDES": true,
+ "google.generativeai.protos.Condition.Operator.GREATER": true,
+ "google.generativeai.protos.Condition.Operator.GREATER_EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.INCLUDES": true,
+ "google.generativeai.protos.Condition.Operator.LESS": true,
+ "google.generativeai.protos.Condition.Operator.LESS_EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.NOT_EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.OPERATOR_UNSPECIFIED": true,
+ "google.generativeai.protos.Condition.Operator.__abs__": true,
+ "google.generativeai.protos.Condition.Operator.__add__": true,
+ "google.generativeai.protos.Condition.Operator.__and__": true,
+ "google.generativeai.protos.Condition.Operator.__bool__": true,
+ "google.generativeai.protos.Condition.Operator.__contains__": true,
+ "google.generativeai.protos.Condition.Operator.__eq__": true,
+ "google.generativeai.protos.Condition.Operator.__floordiv__": true,
+ "google.generativeai.protos.Condition.Operator.__ge__": true,
+ "google.generativeai.protos.Condition.Operator.__getitem__": true,
+ "google.generativeai.protos.Condition.Operator.__gt__": true,
+ "google.generativeai.protos.Condition.Operator.__init__": true,
+ "google.generativeai.protos.Condition.Operator.__invert__": true,
+ "google.generativeai.protos.Condition.Operator.__iter__": true,
+ "google.generativeai.protos.Condition.Operator.__le__": true,
+ "google.generativeai.protos.Condition.Operator.__len__": true,
+ "google.generativeai.protos.Condition.Operator.__lshift__": true,
+ "google.generativeai.protos.Condition.Operator.__lt__": true,
+ "google.generativeai.protos.Condition.Operator.__mod__": true,
+ "google.generativeai.protos.Condition.Operator.__mul__": true,
+ "google.generativeai.protos.Condition.Operator.__ne__": true,
+ "google.generativeai.protos.Condition.Operator.__neg__": true,
+ "google.generativeai.protos.Condition.Operator.__new__": true,
+ "google.generativeai.protos.Condition.Operator.__or__": true,
+ "google.generativeai.protos.Condition.Operator.__pos__": true,
+ "google.generativeai.protos.Condition.Operator.__pow__": true,
+ "google.generativeai.protos.Condition.Operator.__radd__": true,
+ "google.generativeai.protos.Condition.Operator.__rand__": true,
+ "google.generativeai.protos.Condition.Operator.__rfloordiv__": true,
+ "google.generativeai.protos.Condition.Operator.__rlshift__": true,
+ "google.generativeai.protos.Condition.Operator.__rmod__": true,
+ "google.generativeai.protos.Condition.Operator.__rmul__": true,
+ "google.generativeai.protos.Condition.Operator.__ror__": true,
+ "google.generativeai.protos.Condition.Operator.__rpow__": true,
+ "google.generativeai.protos.Condition.Operator.__rrshift__": true,
+ "google.generativeai.protos.Condition.Operator.__rshift__": true,
+ "google.generativeai.protos.Condition.Operator.__rsub__": true,
+ "google.generativeai.protos.Condition.Operator.__rtruediv__": true,
+ "google.generativeai.protos.Condition.Operator.__rxor__": true,
+ "google.generativeai.protos.Condition.Operator.__sub__": true,
+ "google.generativeai.protos.Condition.Operator.__truediv__": true,
+ "google.generativeai.protos.Condition.Operator.__xor__": true,
+ "google.generativeai.protos.Condition.Operator.as_integer_ratio": true,
+ "google.generativeai.protos.Condition.Operator.bit_count": true,
+ "google.generativeai.protos.Condition.Operator.bit_length": true,
+ "google.generativeai.protos.Condition.Operator.conjugate": true,
+ "google.generativeai.protos.Condition.Operator.denominator": true,
+ "google.generativeai.protos.Condition.Operator.from_bytes": true,
+ "google.generativeai.protos.Condition.Operator.imag": true,
+ "google.generativeai.protos.Condition.Operator.numerator": true,
+ "google.generativeai.protos.Condition.Operator.real": true,
+ "google.generativeai.protos.Condition.Operator.to_bytes": true,
+ "google.generativeai.protos.Condition.__call__": true,
+ "google.generativeai.protos.Condition.__eq__": true,
+ "google.generativeai.protos.Condition.__ge__": true,
+ "google.generativeai.protos.Condition.__gt__": true,
+ "google.generativeai.protos.Condition.__init__": true,
+ "google.generativeai.protos.Condition.__le__": true,
+ "google.generativeai.protos.Condition.__lt__": true,
+ "google.generativeai.protos.Condition.__ne__": true,
+ "google.generativeai.protos.Condition.__new__": true,
+ "google.generativeai.protos.Condition.__or__": true,
+ "google.generativeai.protos.Condition.__ror__": true,
+ "google.generativeai.protos.Condition.copy_from": true,
+ "google.generativeai.protos.Condition.deserialize": true,
+ "google.generativeai.protos.Condition.from_json": true,
+ "google.generativeai.protos.Condition.mro": true,
+ "google.generativeai.protos.Condition.numeric_value": true,
+ "google.generativeai.protos.Condition.operation": true,
+ "google.generativeai.protos.Condition.pb": true,
+ "google.generativeai.protos.Condition.serialize": true,
+ "google.generativeai.protos.Condition.string_value": true,
+ "google.generativeai.protos.Condition.to_dict": true,
+ "google.generativeai.protos.Condition.to_json": true,
+ "google.generativeai.protos.Condition.wrap": true,
+ "google.generativeai.protos.Content": false,
+ "google.generativeai.protos.Content.__call__": true,
+ "google.generativeai.protos.Content.__eq__": true,
+ "google.generativeai.protos.Content.__ge__": true,
+ "google.generativeai.protos.Content.__gt__": true,
+ "google.generativeai.protos.Content.__init__": true,
+ "google.generativeai.protos.Content.__le__": true,
+ "google.generativeai.protos.Content.__lt__": true,
+ "google.generativeai.protos.Content.__ne__": true,
+ "google.generativeai.protos.Content.__new__": true,
+ "google.generativeai.protos.Content.__or__": true,
+ "google.generativeai.protos.Content.__ror__": true,
+ "google.generativeai.protos.Content.copy_from": true,
+ "google.generativeai.protos.Content.deserialize": true,
+ "google.generativeai.protos.Content.from_json": true,
+ "google.generativeai.protos.Content.mro": true,
+ "google.generativeai.protos.Content.parts": true,
+ "google.generativeai.protos.Content.pb": true,
+ "google.generativeai.protos.Content.role": true,
+ "google.generativeai.protos.Content.serialize": true,
+ "google.generativeai.protos.Content.to_dict": true,
+ "google.generativeai.protos.Content.to_json": true,
+ "google.generativeai.protos.Content.wrap": true,
+ "google.generativeai.protos.ContentEmbedding": false,
+ "google.generativeai.protos.ContentEmbedding.__call__": true,
+ "google.generativeai.protos.ContentEmbedding.__eq__": true,
+ "google.generativeai.protos.ContentEmbedding.__ge__": true,
+ "google.generativeai.protos.ContentEmbedding.__gt__": true,
+ "google.generativeai.protos.ContentEmbedding.__init__": true,
+ "google.generativeai.protos.ContentEmbedding.__le__": true,
+ "google.generativeai.protos.ContentEmbedding.__lt__": true,
+ "google.generativeai.protos.ContentEmbedding.__ne__": true,
+ "google.generativeai.protos.ContentEmbedding.__new__": true,
+ "google.generativeai.protos.ContentEmbedding.__or__": true,
+ "google.generativeai.protos.ContentEmbedding.__ror__": true,
+ "google.generativeai.protos.ContentEmbedding.copy_from": true,
+ "google.generativeai.protos.ContentEmbedding.deserialize": true,
+ "google.generativeai.protos.ContentEmbedding.from_json": true,
+ "google.generativeai.protos.ContentEmbedding.mro": true,
+ "google.generativeai.protos.ContentEmbedding.pb": true,
+ "google.generativeai.protos.ContentEmbedding.serialize": true,
+ "google.generativeai.protos.ContentEmbedding.to_dict": true,
+ "google.generativeai.protos.ContentEmbedding.to_json": true,
+ "google.generativeai.protos.ContentEmbedding.values": true,
+ "google.generativeai.protos.ContentEmbedding.wrap": true,
+ "google.generativeai.protos.ContentFilter": false,
+ "google.generativeai.protos.ContentFilter.BlockedReason": false,
+ "google.generativeai.protos.ContentFilter.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.OTHER": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.SAFETY": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__add__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__and__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__contains__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__init__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__iter__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__le__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__len__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__new__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__or__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.denominator": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.imag": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.numerator": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.real": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": true,
+ "google.generativeai.protos.ContentFilter.__call__": true,
+ "google.generativeai.protos.ContentFilter.__eq__": true,
+ "google.generativeai.protos.ContentFilter.__ge__": true,
+ "google.generativeai.protos.ContentFilter.__gt__": true,
+ "google.generativeai.protos.ContentFilter.__init__": true,
+ "google.generativeai.protos.ContentFilter.__le__": true,
+ "google.generativeai.protos.ContentFilter.__lt__": true,
+ "google.generativeai.protos.ContentFilter.__ne__": true,
+ "google.generativeai.protos.ContentFilter.__new__": true,
+ "google.generativeai.protos.ContentFilter.__or__": true,
+ "google.generativeai.protos.ContentFilter.__ror__": true,
+ "google.generativeai.protos.ContentFilter.copy_from": true,
+ "google.generativeai.protos.ContentFilter.deserialize": true,
+ "google.generativeai.protos.ContentFilter.from_json": true,
+ "google.generativeai.protos.ContentFilter.message": true,
+ "google.generativeai.protos.ContentFilter.mro": true,
+ "google.generativeai.protos.ContentFilter.pb": true,
+ "google.generativeai.protos.ContentFilter.reason": true,
+ "google.generativeai.protos.ContentFilter.serialize": true,
+ "google.generativeai.protos.ContentFilter.to_dict": true,
+ "google.generativeai.protos.ContentFilter.to_json": true,
+ "google.generativeai.protos.ContentFilter.wrap": true,
+ "google.generativeai.protos.Corpus": false,
+ "google.generativeai.protos.Corpus.__call__": true,
+ "google.generativeai.protos.Corpus.__eq__": true,
+ "google.generativeai.protos.Corpus.__ge__": true,
+ "google.generativeai.protos.Corpus.__gt__": true,
+ "google.generativeai.protos.Corpus.__init__": true,
+ "google.generativeai.protos.Corpus.__le__": true,
+ "google.generativeai.protos.Corpus.__lt__": true,
+ "google.generativeai.protos.Corpus.__ne__": true,
+ "google.generativeai.protos.Corpus.__new__": true,
+ "google.generativeai.protos.Corpus.__or__": true,
+ "google.generativeai.protos.Corpus.__ror__": true,
+ "google.generativeai.protos.Corpus.copy_from": true,
+ "google.generativeai.protos.Corpus.create_time": true,
+ "google.generativeai.protos.Corpus.deserialize": true,
+ "google.generativeai.protos.Corpus.display_name": true,
+ "google.generativeai.protos.Corpus.from_json": true,
+ "google.generativeai.protos.Corpus.mro": true,
+ "google.generativeai.protos.Corpus.name": true,
+ "google.generativeai.protos.Corpus.pb": true,
+ "google.generativeai.protos.Corpus.serialize": true,
+ "google.generativeai.protos.Corpus.to_dict": true,
+ "google.generativeai.protos.Corpus.to_json": true,
+ "google.generativeai.protos.Corpus.update_time": true,
+ "google.generativeai.protos.Corpus.wrap": true,
+ "google.generativeai.protos.CountMessageTokensRequest": false,
+ "google.generativeai.protos.CountMessageTokensRequest.__call__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__eq__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__ge__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__gt__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__init__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__le__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__lt__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__ne__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__new__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__or__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__ror__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.copy_from": true,
+ "google.generativeai.protos.CountMessageTokensRequest.deserialize": true,
+ "google.generativeai.protos.CountMessageTokensRequest.from_json": true,
+ "google.generativeai.protos.CountMessageTokensRequest.model": true,
+ "google.generativeai.protos.CountMessageTokensRequest.mro": true,
+ "google.generativeai.protos.CountMessageTokensRequest.pb": true,
+ "google.generativeai.protos.CountMessageTokensRequest.prompt": true,
+ "google.generativeai.protos.CountMessageTokensRequest.serialize": true,
+ "google.generativeai.protos.CountMessageTokensRequest.to_dict": true,
+ "google.generativeai.protos.CountMessageTokensRequest.to_json": true,
+ "google.generativeai.protos.CountMessageTokensRequest.wrap": true,
+ "google.generativeai.protos.CountMessageTokensResponse": false,
+ "google.generativeai.protos.CountMessageTokensResponse.__call__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__eq__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__ge__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__gt__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__init__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__le__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__lt__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__ne__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__new__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__or__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__ror__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.copy_from": true,
+ "google.generativeai.protos.CountMessageTokensResponse.deserialize": true,
+ "google.generativeai.protos.CountMessageTokensResponse.from_json": true,
+ "google.generativeai.protos.CountMessageTokensResponse.mro": true,
+ "google.generativeai.protos.CountMessageTokensResponse.pb": true,
+ "google.generativeai.protos.CountMessageTokensResponse.serialize": true,
+ "google.generativeai.protos.CountMessageTokensResponse.to_dict": true,
+ "google.generativeai.protos.CountMessageTokensResponse.to_json": true,
+ "google.generativeai.protos.CountMessageTokensResponse.token_count": true,
+ "google.generativeai.protos.CountMessageTokensResponse.wrap": true,
+ "google.generativeai.protos.CountTextTokensRequest": false,
+ "google.generativeai.protos.CountTextTokensRequest.__call__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__eq__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__ge__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__gt__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__init__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__le__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__lt__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__ne__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__new__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__or__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__ror__": true,
+ "google.generativeai.protos.CountTextTokensRequest.copy_from": true,
+ "google.generativeai.protos.CountTextTokensRequest.deserialize": true,
+ "google.generativeai.protos.CountTextTokensRequest.from_json": true,
+ "google.generativeai.protos.CountTextTokensRequest.model": true,
+ "google.generativeai.protos.CountTextTokensRequest.mro": true,
+ "google.generativeai.protos.CountTextTokensRequest.pb": true,
+ "google.generativeai.protos.CountTextTokensRequest.prompt": true,
+ "google.generativeai.protos.CountTextTokensRequest.serialize": true,
+ "google.generativeai.protos.CountTextTokensRequest.to_dict": true,
+ "google.generativeai.protos.CountTextTokensRequest.to_json": true,
+ "google.generativeai.protos.CountTextTokensRequest.wrap": true,
+ "google.generativeai.protos.CountTextTokensResponse": false,
+ "google.generativeai.protos.CountTextTokensResponse.__call__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__eq__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__ge__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__gt__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__init__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__le__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__lt__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__ne__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__new__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__or__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__ror__": true,
+ "google.generativeai.protos.CountTextTokensResponse.copy_from": true,
+ "google.generativeai.protos.CountTextTokensResponse.deserialize": true,
+ "google.generativeai.protos.CountTextTokensResponse.from_json": true,
+ "google.generativeai.protos.CountTextTokensResponse.mro": true,
+ "google.generativeai.protos.CountTextTokensResponse.pb": true,
+ "google.generativeai.protos.CountTextTokensResponse.serialize": true,
+ "google.generativeai.protos.CountTextTokensResponse.to_dict": true,
+ "google.generativeai.protos.CountTextTokensResponse.to_json": true,
+ "google.generativeai.protos.CountTextTokensResponse.token_count": true,
+ "google.generativeai.protos.CountTextTokensResponse.wrap": true,
+ "google.generativeai.protos.CountTokensRequest": false,
+ "google.generativeai.protos.CountTokensRequest.__call__": true,
+ "google.generativeai.protos.CountTokensRequest.__eq__": true,
+ "google.generativeai.protos.CountTokensRequest.__ge__": true,
+ "google.generativeai.protos.CountTokensRequest.__gt__": true,
+ "google.generativeai.protos.CountTokensRequest.__init__": true,
+ "google.generativeai.protos.CountTokensRequest.__le__": true,
+ "google.generativeai.protos.CountTokensRequest.__lt__": true,
+ "google.generativeai.protos.CountTokensRequest.__ne__": true,
+ "google.generativeai.protos.CountTokensRequest.__new__": true,
+ "google.generativeai.protos.CountTokensRequest.__or__": true,
+ "google.generativeai.protos.CountTokensRequest.__ror__": true,
+ "google.generativeai.protos.CountTokensRequest.contents": true,
+ "google.generativeai.protos.CountTokensRequest.copy_from": true,
+ "google.generativeai.protos.CountTokensRequest.deserialize": true,
+ "google.generativeai.protos.CountTokensRequest.from_json": true,
+ "google.generativeai.protos.CountTokensRequest.generate_content_request": true,
+ "google.generativeai.protos.CountTokensRequest.model": true,
+ "google.generativeai.protos.CountTokensRequest.mro": true,
+ "google.generativeai.protos.CountTokensRequest.pb": true,
+ "google.generativeai.protos.CountTokensRequest.serialize": true,
+ "google.generativeai.protos.CountTokensRequest.to_dict": true,
+ "google.generativeai.protos.CountTokensRequest.to_json": true,
+ "google.generativeai.protos.CountTokensRequest.wrap": true,
+ "google.generativeai.protos.CountTokensResponse": false,
+ "google.generativeai.protos.CountTokensResponse.__call__": true,
+ "google.generativeai.protos.CountTokensResponse.__eq__": true,
+ "google.generativeai.protos.CountTokensResponse.__ge__": true,
+ "google.generativeai.protos.CountTokensResponse.__gt__": true,
+ "google.generativeai.protos.CountTokensResponse.__init__": true,
+ "google.generativeai.protos.CountTokensResponse.__le__": true,
+ "google.generativeai.protos.CountTokensResponse.__lt__": true,
+ "google.generativeai.protos.CountTokensResponse.__ne__": true,
+ "google.generativeai.protos.CountTokensResponse.__new__": true,
+ "google.generativeai.protos.CountTokensResponse.__or__": true,
+ "google.generativeai.protos.CountTokensResponse.__ror__": true,
+ "google.generativeai.protos.CountTokensResponse.cached_content_token_count": true,
+ "google.generativeai.protos.CountTokensResponse.copy_from": true,
+ "google.generativeai.protos.CountTokensResponse.deserialize": true,
+ "google.generativeai.protos.CountTokensResponse.from_json": true,
+ "google.generativeai.protos.CountTokensResponse.mro": true,
+ "google.generativeai.protos.CountTokensResponse.pb": true,
+ "google.generativeai.protos.CountTokensResponse.serialize": true,
+ "google.generativeai.protos.CountTokensResponse.to_dict": true,
+ "google.generativeai.protos.CountTokensResponse.to_json": true,
+ "google.generativeai.protos.CountTokensResponse.total_tokens": true,
+ "google.generativeai.protos.CountTokensResponse.wrap": true,
+ "google.generativeai.protos.CreateCachedContentRequest": false,
+ "google.generativeai.protos.CreateCachedContentRequest.__call__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__init__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__le__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__new__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__or__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.cached_content": true,
+ "google.generativeai.protos.CreateCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.CreateCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.CreateCachedContentRequest.from_json": true,
+ "google.generativeai.protos.CreateCachedContentRequest.mro": true,
+ "google.generativeai.protos.CreateCachedContentRequest.pb": true,
+ "google.generativeai.protos.CreateCachedContentRequest.serialize": true,
+ "google.generativeai.protos.CreateCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.CreateCachedContentRequest.to_json": true,
+ "google.generativeai.protos.CreateCachedContentRequest.wrap": true,
+ "google.generativeai.protos.CreateChunkRequest": false,
+ "google.generativeai.protos.CreateChunkRequest.__call__": true,
+ "google.generativeai.protos.CreateChunkRequest.__eq__": true,
+ "google.generativeai.protos.CreateChunkRequest.__ge__": true,
+ "google.generativeai.protos.CreateChunkRequest.__gt__": true,
+ "google.generativeai.protos.CreateChunkRequest.__init__": true,
+ "google.generativeai.protos.CreateChunkRequest.__le__": true,
+ "google.generativeai.protos.CreateChunkRequest.__lt__": true,
+ "google.generativeai.protos.CreateChunkRequest.__ne__": true,
+ "google.generativeai.protos.CreateChunkRequest.__new__": true,
+ "google.generativeai.protos.CreateChunkRequest.__or__": true,
+ "google.generativeai.protos.CreateChunkRequest.__ror__": true,
+ "google.generativeai.protos.CreateChunkRequest.chunk": true,
+ "google.generativeai.protos.CreateChunkRequest.copy_from": true,
+ "google.generativeai.protos.CreateChunkRequest.deserialize": true,
+ "google.generativeai.protos.CreateChunkRequest.from_json": true,
+ "google.generativeai.protos.CreateChunkRequest.mro": true,
+ "google.generativeai.protos.CreateChunkRequest.parent": true,
+ "google.generativeai.protos.CreateChunkRequest.pb": true,
+ "google.generativeai.protos.CreateChunkRequest.serialize": true,
+ "google.generativeai.protos.CreateChunkRequest.to_dict": true,
+ "google.generativeai.protos.CreateChunkRequest.to_json": true,
+ "google.generativeai.protos.CreateChunkRequest.wrap": true,
+ "google.generativeai.protos.CreateCorpusRequest": false,
+ "google.generativeai.protos.CreateCorpusRequest.__call__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__eq__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__ge__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__gt__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__init__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__le__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__lt__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__ne__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__new__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__or__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__ror__": true,
+ "google.generativeai.protos.CreateCorpusRequest.copy_from": true,
+ "google.generativeai.protos.CreateCorpusRequest.corpus": true,
+ "google.generativeai.protos.CreateCorpusRequest.deserialize": true,
+ "google.generativeai.protos.CreateCorpusRequest.from_json": true,
+ "google.generativeai.protos.CreateCorpusRequest.mro": true,
+ "google.generativeai.protos.CreateCorpusRequest.pb": true,
+ "google.generativeai.protos.CreateCorpusRequest.serialize": true,
+ "google.generativeai.protos.CreateCorpusRequest.to_dict": true,
+ "google.generativeai.protos.CreateCorpusRequest.to_json": true,
+ "google.generativeai.protos.CreateCorpusRequest.wrap": true,
+ "google.generativeai.protos.CreateDocumentRequest": false,
+ "google.generativeai.protos.CreateDocumentRequest.__call__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__eq__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__ge__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__gt__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__init__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__le__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__lt__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__ne__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__new__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__or__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__ror__": true,
+ "google.generativeai.protos.CreateDocumentRequest.copy_from": true,
+ "google.generativeai.protos.CreateDocumentRequest.deserialize": true,
+ "google.generativeai.protos.CreateDocumentRequest.document": true,
+ "google.generativeai.protos.CreateDocumentRequest.from_json": true,
+ "google.generativeai.protos.CreateDocumentRequest.mro": true,
+ "google.generativeai.protos.CreateDocumentRequest.parent": true,
+ "google.generativeai.protos.CreateDocumentRequest.pb": true,
+ "google.generativeai.protos.CreateDocumentRequest.serialize": true,
+ "google.generativeai.protos.CreateDocumentRequest.to_dict": true,
+ "google.generativeai.protos.CreateDocumentRequest.to_json": true,
+ "google.generativeai.protos.CreateDocumentRequest.wrap": true,
+ "google.generativeai.protos.CreateFileRequest": false,
+ "google.generativeai.protos.CreateFileRequest.__call__": true,
+ "google.generativeai.protos.CreateFileRequest.__eq__": true,
+ "google.generativeai.protos.CreateFileRequest.__ge__": true,
+ "google.generativeai.protos.CreateFileRequest.__gt__": true,
+ "google.generativeai.protos.CreateFileRequest.__init__": true,
+ "google.generativeai.protos.CreateFileRequest.__le__": true,
+ "google.generativeai.protos.CreateFileRequest.__lt__": true,
+ "google.generativeai.protos.CreateFileRequest.__ne__": true,
+ "google.generativeai.protos.CreateFileRequest.__new__": true,
+ "google.generativeai.protos.CreateFileRequest.__or__": true,
+ "google.generativeai.protos.CreateFileRequest.__ror__": true,
+ "google.generativeai.protos.CreateFileRequest.copy_from": true,
+ "google.generativeai.protos.CreateFileRequest.deserialize": true,
+ "google.generativeai.protos.CreateFileRequest.file": true,
+ "google.generativeai.protos.CreateFileRequest.from_json": true,
+ "google.generativeai.protos.CreateFileRequest.mro": true,
+ "google.generativeai.protos.CreateFileRequest.pb": true,
+ "google.generativeai.protos.CreateFileRequest.serialize": true,
+ "google.generativeai.protos.CreateFileRequest.to_dict": true,
+ "google.generativeai.protos.CreateFileRequest.to_json": true,
+ "google.generativeai.protos.CreateFileRequest.wrap": true,
+ "google.generativeai.protos.CreateFileResponse": false,
+ "google.generativeai.protos.CreateFileResponse.__call__": true,
+ "google.generativeai.protos.CreateFileResponse.__eq__": true,
+ "google.generativeai.protos.CreateFileResponse.__ge__": true,
+ "google.generativeai.protos.CreateFileResponse.__gt__": true,
+ "google.generativeai.protos.CreateFileResponse.__init__": true,
+ "google.generativeai.protos.CreateFileResponse.__le__": true,
+ "google.generativeai.protos.CreateFileResponse.__lt__": true,
+ "google.generativeai.protos.CreateFileResponse.__ne__": true,
+ "google.generativeai.protos.CreateFileResponse.__new__": true,
+ "google.generativeai.protos.CreateFileResponse.__or__": true,
+ "google.generativeai.protos.CreateFileResponse.__ror__": true,
+ "google.generativeai.protos.CreateFileResponse.copy_from": true,
+ "google.generativeai.protos.CreateFileResponse.deserialize": true,
+ "google.generativeai.protos.CreateFileResponse.file": true,
+ "google.generativeai.protos.CreateFileResponse.from_json": true,
+ "google.generativeai.protos.CreateFileResponse.mro": true,
+ "google.generativeai.protos.CreateFileResponse.pb": true,
+ "google.generativeai.protos.CreateFileResponse.serialize": true,
+ "google.generativeai.protos.CreateFileResponse.to_dict": true,
+ "google.generativeai.protos.CreateFileResponse.to_json": true,
+ "google.generativeai.protos.CreateFileResponse.wrap": true,
+ "google.generativeai.protos.CreatePermissionRequest": false,
+ "google.generativeai.protos.CreatePermissionRequest.__call__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__eq__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__ge__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__gt__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__init__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__le__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__lt__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__ne__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__new__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__or__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__ror__": true,
+ "google.generativeai.protos.CreatePermissionRequest.copy_from": true,
+ "google.generativeai.protos.CreatePermissionRequest.deserialize": true,
+ "google.generativeai.protos.CreatePermissionRequest.from_json": true,
+ "google.generativeai.protos.CreatePermissionRequest.mro": true,
+ "google.generativeai.protos.CreatePermissionRequest.parent": true,
+ "google.generativeai.protos.CreatePermissionRequest.pb": true,
+ "google.generativeai.protos.CreatePermissionRequest.permission": true,
+ "google.generativeai.protos.CreatePermissionRequest.serialize": true,
+ "google.generativeai.protos.CreatePermissionRequest.to_dict": true,
+ "google.generativeai.protos.CreatePermissionRequest.to_json": true,
+ "google.generativeai.protos.CreatePermissionRequest.wrap": true,
+ "google.generativeai.protos.CreateTunedModelMetadata": false,
+ "google.generativeai.protos.CreateTunedModelMetadata.__call__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__eq__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__ge__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__gt__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__init__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__le__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__lt__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__ne__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__new__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__or__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__ror__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.completed_percent": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.completed_steps": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.copy_from": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.deserialize": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.from_json": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.mro": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.pb": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.serialize": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.snapshots": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.to_dict": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.to_json": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.total_steps": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.tuned_model": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.wrap": true,
+ "google.generativeai.protos.CreateTunedModelRequest": false,
+ "google.generativeai.protos.CreateTunedModelRequest.__call__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__init__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__le__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__new__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__or__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.CreateTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.CreateTunedModelRequest.from_json": true,
+ "google.generativeai.protos.CreateTunedModelRequest.mro": true,
+ "google.generativeai.protos.CreateTunedModelRequest.pb": true,
+ "google.generativeai.protos.CreateTunedModelRequest.serialize": true,
+ "google.generativeai.protos.CreateTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.CreateTunedModelRequest.to_json": true,
+ "google.generativeai.protos.CreateTunedModelRequest.tuned_model": true,
+ "google.generativeai.protos.CreateTunedModelRequest.tuned_model_id": true,
+ "google.generativeai.protos.CreateTunedModelRequest.wrap": true,
+ "google.generativeai.protos.CustomMetadata": false,
+ "google.generativeai.protos.CustomMetadata.__call__": true,
+ "google.generativeai.protos.CustomMetadata.__eq__": true,
+ "google.generativeai.protos.CustomMetadata.__ge__": true,
+ "google.generativeai.protos.CustomMetadata.__gt__": true,
+ "google.generativeai.protos.CustomMetadata.__init__": true,
+ "google.generativeai.protos.CustomMetadata.__le__": true,
+ "google.generativeai.protos.CustomMetadata.__lt__": true,
+ "google.generativeai.protos.CustomMetadata.__ne__": true,
+ "google.generativeai.protos.CustomMetadata.__new__": true,
+ "google.generativeai.protos.CustomMetadata.__or__": true,
+ "google.generativeai.protos.CustomMetadata.__ror__": true,
+ "google.generativeai.protos.CustomMetadata.copy_from": true,
+ "google.generativeai.protos.CustomMetadata.deserialize": true,
+ "google.generativeai.protos.CustomMetadata.from_json": true,
+ "google.generativeai.protos.CustomMetadata.key": true,
+ "google.generativeai.protos.CustomMetadata.mro": true,
+ "google.generativeai.protos.CustomMetadata.numeric_value": true,
+ "google.generativeai.protos.CustomMetadata.pb": true,
+ "google.generativeai.protos.CustomMetadata.serialize": true,
+ "google.generativeai.protos.CustomMetadata.string_list_value": true,
+ "google.generativeai.protos.CustomMetadata.string_value": true,
+ "google.generativeai.protos.CustomMetadata.to_dict": true,
+ "google.generativeai.protos.CustomMetadata.to_json": true,
+ "google.generativeai.protos.CustomMetadata.wrap": true,
+ "google.generativeai.protos.Dataset": false,
+ "google.generativeai.protos.Dataset.__call__": true,
+ "google.generativeai.protos.Dataset.__eq__": true,
+ "google.generativeai.protos.Dataset.__ge__": true,
+ "google.generativeai.protos.Dataset.__gt__": true,
+ "google.generativeai.protos.Dataset.__init__": true,
+ "google.generativeai.protos.Dataset.__le__": true,
+ "google.generativeai.protos.Dataset.__lt__": true,
+ "google.generativeai.protos.Dataset.__ne__": true,
+ "google.generativeai.protos.Dataset.__new__": true,
+ "google.generativeai.protos.Dataset.__or__": true,
+ "google.generativeai.protos.Dataset.__ror__": true,
+ "google.generativeai.protos.Dataset.copy_from": true,
+ "google.generativeai.protos.Dataset.deserialize": true,
+ "google.generativeai.protos.Dataset.examples": true,
+ "google.generativeai.protos.Dataset.from_json": true,
+ "google.generativeai.protos.Dataset.mro": true,
+ "google.generativeai.protos.Dataset.pb": true,
+ "google.generativeai.protos.Dataset.serialize": true,
+ "google.generativeai.protos.Dataset.to_dict": true,
+ "google.generativeai.protos.Dataset.to_json": true,
+ "google.generativeai.protos.Dataset.wrap": true,
+ "google.generativeai.protos.DeleteCachedContentRequest": false,
+ "google.generativeai.protos.DeleteCachedContentRequest.__call__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__init__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__le__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__new__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__or__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.from_json": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.mro": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.name": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.pb": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.serialize": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.to_json": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.wrap": true,
+ "google.generativeai.protos.DeleteChunkRequest": false,
+ "google.generativeai.protos.DeleteChunkRequest.__call__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__eq__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__ge__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__gt__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__init__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__le__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__lt__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__ne__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__new__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__or__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__ror__": true,
+ "google.generativeai.protos.DeleteChunkRequest.copy_from": true,
+ "google.generativeai.protos.DeleteChunkRequest.deserialize": true,
+ "google.generativeai.protos.DeleteChunkRequest.from_json": true,
+ "google.generativeai.protos.DeleteChunkRequest.mro": true,
+ "google.generativeai.protos.DeleteChunkRequest.name": true,
+ "google.generativeai.protos.DeleteChunkRequest.pb": true,
+ "google.generativeai.protos.DeleteChunkRequest.serialize": true,
+ "google.generativeai.protos.DeleteChunkRequest.to_dict": true,
+ "google.generativeai.protos.DeleteChunkRequest.to_json": true,
+ "google.generativeai.protos.DeleteChunkRequest.wrap": true,
+ "google.generativeai.protos.DeleteCorpusRequest": false,
+ "google.generativeai.protos.DeleteCorpusRequest.__call__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__eq__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__ge__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__gt__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__init__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__le__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__lt__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__ne__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__new__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__or__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__ror__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.copy_from": true,
+ "google.generativeai.protos.DeleteCorpusRequest.deserialize": true,
+ "google.generativeai.protos.DeleteCorpusRequest.force": true,
+ "google.generativeai.protos.DeleteCorpusRequest.from_json": true,
+ "google.generativeai.protos.DeleteCorpusRequest.mro": true,
+ "google.generativeai.protos.DeleteCorpusRequest.name": true,
+ "google.generativeai.protos.DeleteCorpusRequest.pb": true,
+ "google.generativeai.protos.DeleteCorpusRequest.serialize": true,
+ "google.generativeai.protos.DeleteCorpusRequest.to_dict": true,
+ "google.generativeai.protos.DeleteCorpusRequest.to_json": true,
+ "google.generativeai.protos.DeleteCorpusRequest.wrap": true,
+ "google.generativeai.protos.DeleteDocumentRequest": false,
+ "google.generativeai.protos.DeleteDocumentRequest.__call__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__eq__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__ge__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__gt__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__init__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__le__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__lt__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__ne__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__new__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__or__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__ror__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.copy_from": true,
+ "google.generativeai.protos.DeleteDocumentRequest.deserialize": true,
+ "google.generativeai.protos.DeleteDocumentRequest.force": true,
+ "google.generativeai.protos.DeleteDocumentRequest.from_json": true,
+ "google.generativeai.protos.DeleteDocumentRequest.mro": true,
+ "google.generativeai.protos.DeleteDocumentRequest.name": true,
+ "google.generativeai.protos.DeleteDocumentRequest.pb": true,
+ "google.generativeai.protos.DeleteDocumentRequest.serialize": true,
+ "google.generativeai.protos.DeleteDocumentRequest.to_dict": true,
+ "google.generativeai.protos.DeleteDocumentRequest.to_json": true,
+ "google.generativeai.protos.DeleteDocumentRequest.wrap": true,
+ "google.generativeai.protos.DeleteFileRequest": false,
+ "google.generativeai.protos.DeleteFileRequest.__call__": true,
+ "google.generativeai.protos.DeleteFileRequest.__eq__": true,
+ "google.generativeai.protos.DeleteFileRequest.__ge__": true,
+ "google.generativeai.protos.DeleteFileRequest.__gt__": true,
+ "google.generativeai.protos.DeleteFileRequest.__init__": true,
+ "google.generativeai.protos.DeleteFileRequest.__le__": true,
+ "google.generativeai.protos.DeleteFileRequest.__lt__": true,
+ "google.generativeai.protos.DeleteFileRequest.__ne__": true,
+ "google.generativeai.protos.DeleteFileRequest.__new__": true,
+ "google.generativeai.protos.DeleteFileRequest.__or__": true,
+ "google.generativeai.protos.DeleteFileRequest.__ror__": true,
+ "google.generativeai.protos.DeleteFileRequest.copy_from": true,
+ "google.generativeai.protos.DeleteFileRequest.deserialize": true,
+ "google.generativeai.protos.DeleteFileRequest.from_json": true,
+ "google.generativeai.protos.DeleteFileRequest.mro": true,
+ "google.generativeai.protos.DeleteFileRequest.name": true,
+ "google.generativeai.protos.DeleteFileRequest.pb": true,
+ "google.generativeai.protos.DeleteFileRequest.serialize": true,
+ "google.generativeai.protos.DeleteFileRequest.to_dict": true,
+ "google.generativeai.protos.DeleteFileRequest.to_json": true,
+ "google.generativeai.protos.DeleteFileRequest.wrap": true,
+ "google.generativeai.protos.DeletePermissionRequest": false,
+ "google.generativeai.protos.DeletePermissionRequest.__call__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__eq__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__ge__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__gt__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__init__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__le__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__lt__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__ne__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__new__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__or__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__ror__": true,
+ "google.generativeai.protos.DeletePermissionRequest.copy_from": true,
+ "google.generativeai.protos.DeletePermissionRequest.deserialize": true,
+ "google.generativeai.protos.DeletePermissionRequest.from_json": true,
+ "google.generativeai.protos.DeletePermissionRequest.mro": true,
+ "google.generativeai.protos.DeletePermissionRequest.name": true,
+ "google.generativeai.protos.DeletePermissionRequest.pb": true,
+ "google.generativeai.protos.DeletePermissionRequest.serialize": true,
+ "google.generativeai.protos.DeletePermissionRequest.to_dict": true,
+ "google.generativeai.protos.DeletePermissionRequest.to_json": true,
+ "google.generativeai.protos.DeletePermissionRequest.wrap": true,
+ "google.generativeai.protos.DeleteTunedModelRequest": false,
+ "google.generativeai.protos.DeleteTunedModelRequest.__call__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__init__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__le__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__new__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__or__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.from_json": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.mro": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.name": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.pb": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.serialize": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.to_json": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.wrap": true,
+ "google.generativeai.protos.Document": false,
+ "google.generativeai.protos.Document.__call__": true,
+ "google.generativeai.protos.Document.__eq__": true,
+ "google.generativeai.protos.Document.__ge__": true,
+ "google.generativeai.protos.Document.__gt__": true,
+ "google.generativeai.protos.Document.__init__": true,
+ "google.generativeai.protos.Document.__le__": true,
+ "google.generativeai.protos.Document.__lt__": true,
+ "google.generativeai.protos.Document.__ne__": true,
+ "google.generativeai.protos.Document.__new__": true,
+ "google.generativeai.protos.Document.__or__": true,
+ "google.generativeai.protos.Document.__ror__": true,
+ "google.generativeai.protos.Document.copy_from": true,
+ "google.generativeai.protos.Document.create_time": true,
+ "google.generativeai.protos.Document.custom_metadata": true,
+ "google.generativeai.protos.Document.deserialize": true,
+ "google.generativeai.protos.Document.display_name": true,
+ "google.generativeai.protos.Document.from_json": true,
+ "google.generativeai.protos.Document.mro": true,
+ "google.generativeai.protos.Document.name": true,
+ "google.generativeai.protos.Document.pb": true,
+ "google.generativeai.protos.Document.serialize": true,
+ "google.generativeai.protos.Document.to_dict": true,
+ "google.generativeai.protos.Document.to_json": true,
+ "google.generativeai.protos.Document.update_time": true,
+ "google.generativeai.protos.Document.wrap": true,
+ "google.generativeai.protos.EmbedContentRequest": false,
+ "google.generativeai.protos.EmbedContentRequest.__call__": true,
+ "google.generativeai.protos.EmbedContentRequest.__eq__": true,
+ "google.generativeai.protos.EmbedContentRequest.__ge__": true,
+ "google.generativeai.protos.EmbedContentRequest.__gt__": true,
+ "google.generativeai.protos.EmbedContentRequest.__init__": true,
+ "google.generativeai.protos.EmbedContentRequest.__le__": true,
+ "google.generativeai.protos.EmbedContentRequest.__lt__": true,
+ "google.generativeai.protos.EmbedContentRequest.__ne__": true,
+ "google.generativeai.protos.EmbedContentRequest.__new__": true,
+ "google.generativeai.protos.EmbedContentRequest.__or__": true,
+ "google.generativeai.protos.EmbedContentRequest.__ror__": true,
+ "google.generativeai.protos.EmbedContentRequest.content": true,
+ "google.generativeai.protos.EmbedContentRequest.copy_from": true,
+ "google.generativeai.protos.EmbedContentRequest.deserialize": true,
+ "google.generativeai.protos.EmbedContentRequest.from_json": true,
+ "google.generativeai.protos.EmbedContentRequest.model": true,
+ "google.generativeai.protos.EmbedContentRequest.mro": true,
+ "google.generativeai.protos.EmbedContentRequest.output_dimensionality": true,
+ "google.generativeai.protos.EmbedContentRequest.pb": true,
+ "google.generativeai.protos.EmbedContentRequest.serialize": true,
+ "google.generativeai.protos.EmbedContentRequest.task_type": true,
+ "google.generativeai.protos.EmbedContentRequest.title": true,
+ "google.generativeai.protos.EmbedContentRequest.to_dict": true,
+ "google.generativeai.protos.EmbedContentRequest.to_json": true,
+ "google.generativeai.protos.EmbedContentRequest.wrap": true,
+ "google.generativeai.protos.EmbedContentResponse": false,
+ "google.generativeai.protos.EmbedContentResponse.__call__": true,
+ "google.generativeai.protos.EmbedContentResponse.__eq__": true,
+ "google.generativeai.protos.EmbedContentResponse.__ge__": true,
+ "google.generativeai.protos.EmbedContentResponse.__gt__": true,
+ "google.generativeai.protos.EmbedContentResponse.__init__": true,
+ "google.generativeai.protos.EmbedContentResponse.__le__": true,
+ "google.generativeai.protos.EmbedContentResponse.__lt__": true,
+ "google.generativeai.protos.EmbedContentResponse.__ne__": true,
+ "google.generativeai.protos.EmbedContentResponse.__new__": true,
+ "google.generativeai.protos.EmbedContentResponse.__or__": true,
+ "google.generativeai.protos.EmbedContentResponse.__ror__": true,
+ "google.generativeai.protos.EmbedContentResponse.copy_from": true,
+ "google.generativeai.protos.EmbedContentResponse.deserialize": true,
+ "google.generativeai.protos.EmbedContentResponse.embedding": true,
+ "google.generativeai.protos.EmbedContentResponse.from_json": true,
+ "google.generativeai.protos.EmbedContentResponse.mro": true,
+ "google.generativeai.protos.EmbedContentResponse.pb": true,
+ "google.generativeai.protos.EmbedContentResponse.serialize": true,
+ "google.generativeai.protos.EmbedContentResponse.to_dict": true,
+ "google.generativeai.protos.EmbedContentResponse.to_json": true,
+ "google.generativeai.protos.EmbedContentResponse.wrap": true,
+ "google.generativeai.protos.EmbedTextRequest": false,
+ "google.generativeai.protos.EmbedTextRequest.__call__": true,
+ "google.generativeai.protos.EmbedTextRequest.__eq__": true,
+ "google.generativeai.protos.EmbedTextRequest.__ge__": true,
+ "google.generativeai.protos.EmbedTextRequest.__gt__": true,
+ "google.generativeai.protos.EmbedTextRequest.__init__": true,
+ "google.generativeai.protos.EmbedTextRequest.__le__": true,
+ "google.generativeai.protos.EmbedTextRequest.__lt__": true,
+ "google.generativeai.protos.EmbedTextRequest.__ne__": true,
+ "google.generativeai.protos.EmbedTextRequest.__new__": true,
+ "google.generativeai.protos.EmbedTextRequest.__or__": true,
+ "google.generativeai.protos.EmbedTextRequest.__ror__": true,
+ "google.generativeai.protos.EmbedTextRequest.copy_from": true,
+ "google.generativeai.protos.EmbedTextRequest.deserialize": true,
+ "google.generativeai.protos.EmbedTextRequest.from_json": true,
+ "google.generativeai.protos.EmbedTextRequest.model": true,
+ "google.generativeai.protos.EmbedTextRequest.mro": true,
+ "google.generativeai.protos.EmbedTextRequest.pb": true,
+ "google.generativeai.protos.EmbedTextRequest.serialize": true,
+ "google.generativeai.protos.EmbedTextRequest.text": true,
+ "google.generativeai.protos.EmbedTextRequest.to_dict": true,
+ "google.generativeai.protos.EmbedTextRequest.to_json": true,
+ "google.generativeai.protos.EmbedTextRequest.wrap": true,
+ "google.generativeai.protos.EmbedTextResponse": false,
+ "google.generativeai.protos.EmbedTextResponse.__call__": true,
+ "google.generativeai.protos.EmbedTextResponse.__eq__": true,
+ "google.generativeai.protos.EmbedTextResponse.__ge__": true,
+ "google.generativeai.protos.EmbedTextResponse.__gt__": true,
+ "google.generativeai.protos.EmbedTextResponse.__init__": true,
+ "google.generativeai.protos.EmbedTextResponse.__le__": true,
+ "google.generativeai.protos.EmbedTextResponse.__lt__": true,
+ "google.generativeai.protos.EmbedTextResponse.__ne__": true,
+ "google.generativeai.protos.EmbedTextResponse.__new__": true,
+ "google.generativeai.protos.EmbedTextResponse.__or__": true,
+ "google.generativeai.protos.EmbedTextResponse.__ror__": true,
+ "google.generativeai.protos.EmbedTextResponse.copy_from": true,
+ "google.generativeai.protos.EmbedTextResponse.deserialize": true,
+ "google.generativeai.protos.EmbedTextResponse.embedding": true,
+ "google.generativeai.protos.EmbedTextResponse.from_json": true,
+ "google.generativeai.protos.EmbedTextResponse.mro": true,
+ "google.generativeai.protos.EmbedTextResponse.pb": true,
+ "google.generativeai.protos.EmbedTextResponse.serialize": true,
+ "google.generativeai.protos.EmbedTextResponse.to_dict": true,
+ "google.generativeai.protos.EmbedTextResponse.to_json": true,
+ "google.generativeai.protos.EmbedTextResponse.wrap": true,
+ "google.generativeai.protos.Embedding": false,
+ "google.generativeai.protos.Embedding.__call__": true,
+ "google.generativeai.protos.Embedding.__eq__": true,
+ "google.generativeai.protos.Embedding.__ge__": true,
+ "google.generativeai.protos.Embedding.__gt__": true,
+ "google.generativeai.protos.Embedding.__init__": true,
+ "google.generativeai.protos.Embedding.__le__": true,
+ "google.generativeai.protos.Embedding.__lt__": true,
+ "google.generativeai.protos.Embedding.__ne__": true,
+ "google.generativeai.protos.Embedding.__new__": true,
+ "google.generativeai.protos.Embedding.__or__": true,
+ "google.generativeai.protos.Embedding.__ror__": true,
+ "google.generativeai.protos.Embedding.copy_from": true,
+ "google.generativeai.protos.Embedding.deserialize": true,
+ "google.generativeai.protos.Embedding.from_json": true,
+ "google.generativeai.protos.Embedding.mro": true,
+ "google.generativeai.protos.Embedding.pb": true,
+ "google.generativeai.protos.Embedding.serialize": true,
+ "google.generativeai.protos.Embedding.to_dict": true,
+ "google.generativeai.protos.Embedding.to_json": true,
+ "google.generativeai.protos.Embedding.value": true,
+ "google.generativeai.protos.Embedding.wrap": true,
+ "google.generativeai.protos.Example": false,
+ "google.generativeai.protos.Example.__call__": true,
+ "google.generativeai.protos.Example.__eq__": true,
+ "google.generativeai.protos.Example.__ge__": true,
+ "google.generativeai.protos.Example.__gt__": true,
+ "google.generativeai.protos.Example.__init__": true,
+ "google.generativeai.protos.Example.__le__": true,
+ "google.generativeai.protos.Example.__lt__": true,
+ "google.generativeai.protos.Example.__ne__": true,
+ "google.generativeai.protos.Example.__new__": true,
+ "google.generativeai.protos.Example.__or__": true,
+ "google.generativeai.protos.Example.__ror__": true,
+ "google.generativeai.protos.Example.copy_from": true,
+ "google.generativeai.protos.Example.deserialize": true,
+ "google.generativeai.protos.Example.from_json": true,
+ "google.generativeai.protos.Example.input": true,
+ "google.generativeai.protos.Example.mro": true,
+ "google.generativeai.protos.Example.output": true,
+ "google.generativeai.protos.Example.pb": true,
+ "google.generativeai.protos.Example.serialize": true,
+ "google.generativeai.protos.Example.to_dict": true,
+ "google.generativeai.protos.Example.to_json": true,
+ "google.generativeai.protos.Example.wrap": true,
+ "google.generativeai.protos.ExecutableCode": false,
+ "google.generativeai.protos.ExecutableCode.Language": false,
+ "google.generativeai.protos.ExecutableCode.Language.LANGUAGE_UNSPECIFIED": true,
+ "google.generativeai.protos.ExecutableCode.Language.PYTHON": true,
+ "google.generativeai.protos.ExecutableCode.Language.__abs__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__add__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__and__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__bool__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__contains__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__eq__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__floordiv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__ge__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__getitem__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__gt__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__init__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__invert__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__iter__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__le__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__len__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__lshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__lt__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__mod__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__mul__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__ne__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__neg__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__new__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__or__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__pos__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__pow__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__radd__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rand__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rlshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rmod__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rmul__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__ror__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rpow__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rrshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rsub__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rxor__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__sub__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__truediv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__xor__": true,
+ "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": true,
+ "google.generativeai.protos.ExecutableCode.Language.bit_count": true,
+ "google.generativeai.protos.ExecutableCode.Language.bit_length": true,
+ "google.generativeai.protos.ExecutableCode.Language.conjugate": true,
+ "google.generativeai.protos.ExecutableCode.Language.denominator": true,
+ "google.generativeai.protos.ExecutableCode.Language.from_bytes": true,
+ "google.generativeai.protos.ExecutableCode.Language.imag": true,
+ "google.generativeai.protos.ExecutableCode.Language.numerator": true,
+ "google.generativeai.protos.ExecutableCode.Language.real": true,
+ "google.generativeai.protos.ExecutableCode.Language.to_bytes": true,
+ "google.generativeai.protos.ExecutableCode.__call__": true,
+ "google.generativeai.protos.ExecutableCode.__eq__": true,
+ "google.generativeai.protos.ExecutableCode.__ge__": true,
+ "google.generativeai.protos.ExecutableCode.__gt__": true,
+ "google.generativeai.protos.ExecutableCode.__init__": true,
+ "google.generativeai.protos.ExecutableCode.__le__": true,
+ "google.generativeai.protos.ExecutableCode.__lt__": true,
+ "google.generativeai.protos.ExecutableCode.__ne__": true,
+ "google.generativeai.protos.ExecutableCode.__new__": true,
+ "google.generativeai.protos.ExecutableCode.__or__": true,
+ "google.generativeai.protos.ExecutableCode.__ror__": true,
+ "google.generativeai.protos.ExecutableCode.code": true,
+ "google.generativeai.protos.ExecutableCode.copy_from": true,
+ "google.generativeai.protos.ExecutableCode.deserialize": true,
+ "google.generativeai.protos.ExecutableCode.from_json": true,
+ "google.generativeai.protos.ExecutableCode.language": true,
+ "google.generativeai.protos.ExecutableCode.mro": true,
+ "google.generativeai.protos.ExecutableCode.pb": true,
+ "google.generativeai.protos.ExecutableCode.serialize": true,
+ "google.generativeai.protos.ExecutableCode.to_dict": true,
+ "google.generativeai.protos.ExecutableCode.to_json": true,
+ "google.generativeai.protos.ExecutableCode.wrap": true,
+ "google.generativeai.protos.File": false,
+ "google.generativeai.protos.File.State": false,
+ "google.generativeai.protos.File.State.ACTIVE": true,
+ "google.generativeai.protos.File.State.FAILED": true,
+ "google.generativeai.protos.File.State.PROCESSING": true,
+ "google.generativeai.protos.File.State.STATE_UNSPECIFIED": true,
+ "google.generativeai.protos.File.State.__abs__": true,
+ "google.generativeai.protos.File.State.__add__": true,
+ "google.generativeai.protos.File.State.__and__": true,
+ "google.generativeai.protos.File.State.__bool__": true,
+ "google.generativeai.protos.File.State.__contains__": true,
+ "google.generativeai.protos.File.State.__eq__": true,
+ "google.generativeai.protos.File.State.__floordiv__": true,
+ "google.generativeai.protos.File.State.__ge__": true,
+ "google.generativeai.protos.File.State.__getitem__": true,
+ "google.generativeai.protos.File.State.__gt__": true,
+ "google.generativeai.protos.File.State.__init__": true,
+ "google.generativeai.protos.File.State.__invert__": true,
+ "google.generativeai.protos.File.State.__iter__": true,
+ "google.generativeai.protos.File.State.__le__": true,
+ "google.generativeai.protos.File.State.__len__": true,
+ "google.generativeai.protos.File.State.__lshift__": true,
+ "google.generativeai.protos.File.State.__lt__": true,
+ "google.generativeai.protos.File.State.__mod__": true,
+ "google.generativeai.protos.File.State.__mul__": true,
+ "google.generativeai.protos.File.State.__ne__": true,
+ "google.generativeai.protos.File.State.__neg__": true,
+ "google.generativeai.protos.File.State.__new__": true,
+ "google.generativeai.protos.File.State.__or__": true,
+ "google.generativeai.protos.File.State.__pos__": true,
+ "google.generativeai.protos.File.State.__pow__": true,
+ "google.generativeai.protos.File.State.__radd__": true,
+ "google.generativeai.protos.File.State.__rand__": true,
+ "google.generativeai.protos.File.State.__rfloordiv__": true,
+ "google.generativeai.protos.File.State.__rlshift__": true,
+ "google.generativeai.protos.File.State.__rmod__": true,
+ "google.generativeai.protos.File.State.__rmul__": true,
+ "google.generativeai.protos.File.State.__ror__": true,
+ "google.generativeai.protos.File.State.__rpow__": true,
+ "google.generativeai.protos.File.State.__rrshift__": true,
+ "google.generativeai.protos.File.State.__rshift__": true,
+ "google.generativeai.protos.File.State.__rsub__": true,
+ "google.generativeai.protos.File.State.__rtruediv__": true,
+ "google.generativeai.protos.File.State.__rxor__": true,
+ "google.generativeai.protos.File.State.__sub__": true,
+ "google.generativeai.protos.File.State.__truediv__": true,
+ "google.generativeai.protos.File.State.__xor__": true,
+ "google.generativeai.protos.File.State.as_integer_ratio": true,
+ "google.generativeai.protos.File.State.bit_count": true,
+ "google.generativeai.protos.File.State.bit_length": true,
+ "google.generativeai.protos.File.State.conjugate": true,
+ "google.generativeai.protos.File.State.denominator": true,
+ "google.generativeai.protos.File.State.from_bytes": true,
+ "google.generativeai.protos.File.State.imag": true,
+ "google.generativeai.protos.File.State.numerator": true,
+ "google.generativeai.protos.File.State.real": true,
+ "google.generativeai.protos.File.State.to_bytes": true,
+ "google.generativeai.protos.File.__call__": true,
+ "google.generativeai.protos.File.__eq__": true,
+ "google.generativeai.protos.File.__ge__": true,
+ "google.generativeai.protos.File.__gt__": true,
+ "google.generativeai.protos.File.__init__": true,
+ "google.generativeai.protos.File.__le__": true,
+ "google.generativeai.protos.File.__lt__": true,
+ "google.generativeai.protos.File.__ne__": true,
+ "google.generativeai.protos.File.__new__": true,
+ "google.generativeai.protos.File.__or__": true,
+ "google.generativeai.protos.File.__ror__": true,
+ "google.generativeai.protos.File.copy_from": true,
+ "google.generativeai.protos.File.create_time": true,
+ "google.generativeai.protos.File.deserialize": true,
+ "google.generativeai.protos.File.display_name": true,
+ "google.generativeai.protos.File.error": true,
+ "google.generativeai.protos.File.expiration_time": true,
+ "google.generativeai.protos.File.from_json": true,
+ "google.generativeai.protos.File.mime_type": true,
+ "google.generativeai.protos.File.mro": true,
+ "google.generativeai.protos.File.name": true,
+ "google.generativeai.protos.File.pb": true,
+ "google.generativeai.protos.File.serialize": true,
+ "google.generativeai.protos.File.sha256_hash": true,
+ "google.generativeai.protos.File.size_bytes": true,
+ "google.generativeai.protos.File.state": true,
+ "google.generativeai.protos.File.to_dict": true,
+ "google.generativeai.protos.File.to_json": true,
+ "google.generativeai.protos.File.update_time": true,
+ "google.generativeai.protos.File.uri": true,
+ "google.generativeai.protos.File.video_metadata": true,
+ "google.generativeai.protos.File.wrap": true,
+ "google.generativeai.protos.FileData": false,
+ "google.generativeai.protos.FileData.__call__": true,
+ "google.generativeai.protos.FileData.__eq__": true,
+ "google.generativeai.protos.FileData.__ge__": true,
+ "google.generativeai.protos.FileData.__gt__": true,
+ "google.generativeai.protos.FileData.__init__": true,
+ "google.generativeai.protos.FileData.__le__": true,
+ "google.generativeai.protos.FileData.__lt__": true,
+ "google.generativeai.protos.FileData.__ne__": true,
+ "google.generativeai.protos.FileData.__new__": true,
+ "google.generativeai.protos.FileData.__or__": true,
+ "google.generativeai.protos.FileData.__ror__": true,
+ "google.generativeai.protos.FileData.copy_from": true,
+ "google.generativeai.protos.FileData.deserialize": true,
+ "google.generativeai.protos.FileData.file_uri": true,
+ "google.generativeai.protos.FileData.from_json": true,
+ "google.generativeai.protos.FileData.mime_type": true,
+ "google.generativeai.protos.FileData.mro": true,
+ "google.generativeai.protos.FileData.pb": true,
+ "google.generativeai.protos.FileData.serialize": true,
+ "google.generativeai.protos.FileData.to_dict": true,
+ "google.generativeai.protos.FileData.to_json": true,
+ "google.generativeai.protos.FileData.wrap": true,
+ "google.generativeai.protos.FunctionCall": false,
+ "google.generativeai.protos.FunctionCall.__call__": true,
+ "google.generativeai.protos.FunctionCall.__eq__": true,
+ "google.generativeai.protos.FunctionCall.__ge__": true,
+ "google.generativeai.protos.FunctionCall.__gt__": true,
+ "google.generativeai.protos.FunctionCall.__init__": true,
+ "google.generativeai.protos.FunctionCall.__le__": true,
+ "google.generativeai.protos.FunctionCall.__lt__": true,
+ "google.generativeai.protos.FunctionCall.__ne__": true,
+ "google.generativeai.protos.FunctionCall.__new__": true,
+ "google.generativeai.protos.FunctionCall.__or__": true,
+ "google.generativeai.protos.FunctionCall.__ror__": true,
+ "google.generativeai.protos.FunctionCall.args": true,
+ "google.generativeai.protos.FunctionCall.copy_from": true,
+ "google.generativeai.protos.FunctionCall.deserialize": true,
+ "google.generativeai.protos.FunctionCall.from_json": true,
+ "google.generativeai.protos.FunctionCall.mro": true,
+ "google.generativeai.protos.FunctionCall.name": true,
+ "google.generativeai.protos.FunctionCall.pb": true,
+ "google.generativeai.protos.FunctionCall.serialize": true,
+ "google.generativeai.protos.FunctionCall.to_dict": true,
+ "google.generativeai.protos.FunctionCall.to_json": true,
+ "google.generativeai.protos.FunctionCall.wrap": true,
+ "google.generativeai.protos.FunctionCallingConfig": false,
+ "google.generativeai.protos.FunctionCallingConfig.Mode": false,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.ANY": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.AUTO": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.MODE_UNSPECIFIED": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.NONE": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.from_bytes": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.imag": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.real": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": true,
+ "google.generativeai.protos.FunctionCallingConfig.__call__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__eq__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__ge__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__gt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__init__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__le__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__lt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__ne__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__new__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__or__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__ror__": true,
+ "google.generativeai.protos.FunctionCallingConfig.allowed_function_names": true,
+ "google.generativeai.protos.FunctionCallingConfig.copy_from": true,
+ "google.generativeai.protos.FunctionCallingConfig.deserialize": true,
+ "google.generativeai.protos.FunctionCallingConfig.from_json": true,
+ "google.generativeai.protos.FunctionCallingConfig.mode": true,
+ "google.generativeai.protos.FunctionCallingConfig.mro": true,
+ "google.generativeai.protos.FunctionCallingConfig.pb": true,
+ "google.generativeai.protos.FunctionCallingConfig.serialize": true,
+ "google.generativeai.protos.FunctionCallingConfig.to_dict": true,
+ "google.generativeai.protos.FunctionCallingConfig.to_json": true,
+ "google.generativeai.protos.FunctionCallingConfig.wrap": true,
+ "google.generativeai.protos.FunctionDeclaration": false,
+ "google.generativeai.protos.FunctionDeclaration.__call__": true,
+ "google.generativeai.protos.FunctionDeclaration.__eq__": true,
+ "google.generativeai.protos.FunctionDeclaration.__ge__": true,
+ "google.generativeai.protos.FunctionDeclaration.__gt__": true,
+ "google.generativeai.protos.FunctionDeclaration.__init__": true,
+ "google.generativeai.protos.FunctionDeclaration.__le__": true,
+ "google.generativeai.protos.FunctionDeclaration.__lt__": true,
+ "google.generativeai.protos.FunctionDeclaration.__ne__": true,
+ "google.generativeai.protos.FunctionDeclaration.__new__": true,
+ "google.generativeai.protos.FunctionDeclaration.__or__": true,
+ "google.generativeai.protos.FunctionDeclaration.__ror__": true,
+ "google.generativeai.protos.FunctionDeclaration.copy_from": true,
+ "google.generativeai.protos.FunctionDeclaration.description": true,
+ "google.generativeai.protos.FunctionDeclaration.deserialize": true,
+ "google.generativeai.protos.FunctionDeclaration.from_json": true,
+ "google.generativeai.protos.FunctionDeclaration.mro": true,
+ "google.generativeai.protos.FunctionDeclaration.name": true,
+ "google.generativeai.protos.FunctionDeclaration.parameters": true,
+ "google.generativeai.protos.FunctionDeclaration.pb": true,
+ "google.generativeai.protos.FunctionDeclaration.serialize": true,
+ "google.generativeai.protos.FunctionDeclaration.to_dict": true,
+ "google.generativeai.protos.FunctionDeclaration.to_json": true,
+ "google.generativeai.protos.FunctionDeclaration.wrap": true,
+ "google.generativeai.protos.FunctionResponse": false,
+ "google.generativeai.protos.FunctionResponse.__call__": true,
+ "google.generativeai.protos.FunctionResponse.__eq__": true,
+ "google.generativeai.protos.FunctionResponse.__ge__": true,
+ "google.generativeai.protos.FunctionResponse.__gt__": true,
+ "google.generativeai.protos.FunctionResponse.__init__": true,
+ "google.generativeai.protos.FunctionResponse.__le__": true,
+ "google.generativeai.protos.FunctionResponse.__lt__": true,
+ "google.generativeai.protos.FunctionResponse.__ne__": true,
+ "google.generativeai.protos.FunctionResponse.__new__": true,
+ "google.generativeai.protos.FunctionResponse.__or__": true,
+ "google.generativeai.protos.FunctionResponse.__ror__": true,
+ "google.generativeai.protos.FunctionResponse.copy_from": true,
+ "google.generativeai.protos.FunctionResponse.deserialize": true,
+ "google.generativeai.protos.FunctionResponse.from_json": true,
+ "google.generativeai.protos.FunctionResponse.mro": true,
+ "google.generativeai.protos.FunctionResponse.name": true,
+ "google.generativeai.protos.FunctionResponse.pb": true,
+ "google.generativeai.protos.FunctionResponse.response": true,
+ "google.generativeai.protos.FunctionResponse.serialize": true,
+ "google.generativeai.protos.FunctionResponse.to_dict": true,
+ "google.generativeai.protos.FunctionResponse.to_json": true,
+ "google.generativeai.protos.FunctionResponse.wrap": true,
+ "google.generativeai.protos.GenerateAnswerRequest": false,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": false,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ANSWER_STYLE_UNSPECIFIED": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.VERBOSE": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.from_bytes": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__call__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__init__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__le__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__new__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__or__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.answer_style": true,
+ "google.generativeai.protos.GenerateAnswerRequest.contents": true,
+ "google.generativeai.protos.GenerateAnswerRequest.copy_from": true,
+ "google.generativeai.protos.GenerateAnswerRequest.deserialize": true,
+ "google.generativeai.protos.GenerateAnswerRequest.from_json": true,
+ "google.generativeai.protos.GenerateAnswerRequest.inline_passages": true,
+ "google.generativeai.protos.GenerateAnswerRequest.model": true,
+ "google.generativeai.protos.GenerateAnswerRequest.mro": true,
+ "google.generativeai.protos.GenerateAnswerRequest.pb": true,
+ "google.generativeai.protos.GenerateAnswerRequest.safety_settings": true,
+ "google.generativeai.protos.GenerateAnswerRequest.semantic_retriever": true,
+ "google.generativeai.protos.GenerateAnswerRequest.serialize": true,
+ "google.generativeai.protos.GenerateAnswerRequest.temperature": true,
+ "google.generativeai.protos.GenerateAnswerRequest.to_dict": true,
+ "google.generativeai.protos.GenerateAnswerRequest.to_json": true,
+ "google.generativeai.protos.GenerateAnswerRequest.wrap": true,
+ "google.generativeai.protos.GenerateAnswerResponse": false,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": false,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": false,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.OTHER": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.SAFETY": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.from_bytes": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__call__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__or__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.block_reason": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.mro": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.safety_ratings": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__call__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__init__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__le__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__new__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__or__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.answer": true,
+ "google.generativeai.protos.GenerateAnswerResponse.answerable_probability": true,
+ "google.generativeai.protos.GenerateAnswerResponse.copy_from": true,
+ "google.generativeai.protos.GenerateAnswerResponse.deserialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.from_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.input_feedback": true,
+ "google.generativeai.protos.GenerateAnswerResponse.mro": true,
+ "google.generativeai.protos.GenerateAnswerResponse.pb": true,
+ "google.generativeai.protos.GenerateAnswerResponse.serialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.to_dict": true,
+ "google.generativeai.protos.GenerateAnswerResponse.to_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.wrap": true,
+ "google.generativeai.protos.GenerateContentRequest": false,
+ "google.generativeai.protos.GenerateContentRequest.__call__": true,
+ "google.generativeai.protos.GenerateContentRequest.__eq__": true,
+ "google.generativeai.protos.GenerateContentRequest.__ge__": true,
+ "google.generativeai.protos.GenerateContentRequest.__gt__": true,
+ "google.generativeai.protos.GenerateContentRequest.__init__": true,
+ "google.generativeai.protos.GenerateContentRequest.__le__": true,
+ "google.generativeai.protos.GenerateContentRequest.__lt__": true,
+ "google.generativeai.protos.GenerateContentRequest.__ne__": true,
+ "google.generativeai.protos.GenerateContentRequest.__new__": true,
+ "google.generativeai.protos.GenerateContentRequest.__or__": true,
+ "google.generativeai.protos.GenerateContentRequest.__ror__": true,
+ "google.generativeai.protos.GenerateContentRequest.cached_content": true,
+ "google.generativeai.protos.GenerateContentRequest.contents": true,
+ "google.generativeai.protos.GenerateContentRequest.copy_from": true,
+ "google.generativeai.protos.GenerateContentRequest.deserialize": true,
+ "google.generativeai.protos.GenerateContentRequest.from_json": true,
+ "google.generativeai.protos.GenerateContentRequest.generation_config": true,
+ "google.generativeai.protos.GenerateContentRequest.model": true,
+ "google.generativeai.protos.GenerateContentRequest.mro": true,
+ "google.generativeai.protos.GenerateContentRequest.pb": true,
+ "google.generativeai.protos.GenerateContentRequest.safety_settings": true,
+ "google.generativeai.protos.GenerateContentRequest.serialize": true,
+ "google.generativeai.protos.GenerateContentRequest.system_instruction": true,
+ "google.generativeai.protos.GenerateContentRequest.to_dict": true,
+ "google.generativeai.protos.GenerateContentRequest.to_json": true,
+ "google.generativeai.protos.GenerateContentRequest.tool_config": true,
+ "google.generativeai.protos.GenerateContentRequest.tools": true,
+ "google.generativeai.protos.GenerateContentRequest.wrap": true,
+ "google.generativeai.protos.GenerateContentResponse": false,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback": false,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": false,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.OTHER": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.SAFETY": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.from_bytes": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__call__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.block_reason": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.mro": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.safety_ratings": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata": false,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__call__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.cached_content_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.candidates_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.mro": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.prompt_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.total_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": true,
+ "google.generativeai.protos.GenerateContentResponse.__call__": true,
+ "google.generativeai.protos.GenerateContentResponse.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.candidates": true,
+ "google.generativeai.protos.GenerateContentResponse.copy_from": true,
+ "google.generativeai.protos.GenerateContentResponse.deserialize": true,
+ "google.generativeai.protos.GenerateContentResponse.from_json": true,
+ "google.generativeai.protos.GenerateContentResponse.mro": true,
+ "google.generativeai.protos.GenerateContentResponse.pb": true,
+ "google.generativeai.protos.GenerateContentResponse.prompt_feedback": true,
+ "google.generativeai.protos.GenerateContentResponse.serialize": true,
+ "google.generativeai.protos.GenerateContentResponse.to_dict": true,
+ "google.generativeai.protos.GenerateContentResponse.to_json": true,
+ "google.generativeai.protos.GenerateContentResponse.usage_metadata": true,
+ "google.generativeai.protos.GenerateContentResponse.wrap": true,
+ "google.generativeai.protos.GenerateMessageRequest": false,
+ "google.generativeai.protos.GenerateMessageRequest.__call__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__eq__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__ge__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__gt__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__init__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__le__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__lt__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__ne__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__new__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__or__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__ror__": true,
+ "google.generativeai.protos.GenerateMessageRequest.candidate_count": true,
+ "google.generativeai.protos.GenerateMessageRequest.copy_from": true,
+ "google.generativeai.protos.GenerateMessageRequest.deserialize": true,
+ "google.generativeai.protos.GenerateMessageRequest.from_json": true,
+ "google.generativeai.protos.GenerateMessageRequest.model": true,
+ "google.generativeai.protos.GenerateMessageRequest.mro": true,
+ "google.generativeai.protos.GenerateMessageRequest.pb": true,
+ "google.generativeai.protos.GenerateMessageRequest.prompt": true,
+ "google.generativeai.protos.GenerateMessageRequest.serialize": true,
+ "google.generativeai.protos.GenerateMessageRequest.temperature": true,
+ "google.generativeai.protos.GenerateMessageRequest.to_dict": true,
+ "google.generativeai.protos.GenerateMessageRequest.to_json": true,
+ "google.generativeai.protos.GenerateMessageRequest.top_k": true,
+ "google.generativeai.protos.GenerateMessageRequest.top_p": true,
+ "google.generativeai.protos.GenerateMessageRequest.wrap": true,
+ "google.generativeai.protos.GenerateMessageResponse": false,
+ "google.generativeai.protos.GenerateMessageResponse.__call__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__eq__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__ge__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__gt__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__init__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__le__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__lt__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__ne__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__new__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__or__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__ror__": true,
+ "google.generativeai.protos.GenerateMessageResponse.candidates": true,
+ "google.generativeai.protos.GenerateMessageResponse.copy_from": true,
+ "google.generativeai.protos.GenerateMessageResponse.deserialize": true,
+ "google.generativeai.protos.GenerateMessageResponse.filters": true,
+ "google.generativeai.protos.GenerateMessageResponse.from_json": true,
+ "google.generativeai.protos.GenerateMessageResponse.messages": true,
+ "google.generativeai.protos.GenerateMessageResponse.mro": true,
+ "google.generativeai.protos.GenerateMessageResponse.pb": true,
+ "google.generativeai.protos.GenerateMessageResponse.serialize": true,
+ "google.generativeai.protos.GenerateMessageResponse.to_dict": true,
+ "google.generativeai.protos.GenerateMessageResponse.to_json": true,
+ "google.generativeai.protos.GenerateMessageResponse.wrap": true,
+ "google.generativeai.protos.GenerateTextRequest": false,
+ "google.generativeai.protos.GenerateTextRequest.__call__": true,
+ "google.generativeai.protos.GenerateTextRequest.__eq__": true,
+ "google.generativeai.protos.GenerateTextRequest.__ge__": true,
+ "google.generativeai.protos.GenerateTextRequest.__gt__": true,
+ "google.generativeai.protos.GenerateTextRequest.__init__": true,
+ "google.generativeai.protos.GenerateTextRequest.__le__": true,
+ "google.generativeai.protos.GenerateTextRequest.__lt__": true,
+ "google.generativeai.protos.GenerateTextRequest.__ne__": true,
+ "google.generativeai.protos.GenerateTextRequest.__new__": true,
+ "google.generativeai.protos.GenerateTextRequest.__or__": true,
+ "google.generativeai.protos.GenerateTextRequest.__ror__": true,
+ "google.generativeai.protos.GenerateTextRequest.candidate_count": true,
+ "google.generativeai.protos.GenerateTextRequest.copy_from": true,
+ "google.generativeai.protos.GenerateTextRequest.deserialize": true,
+ "google.generativeai.protos.GenerateTextRequest.from_json": true,
+ "google.generativeai.protos.GenerateTextRequest.max_output_tokens": true,
+ "google.generativeai.protos.GenerateTextRequest.model": true,
+ "google.generativeai.protos.GenerateTextRequest.mro": true,
+ "google.generativeai.protos.GenerateTextRequest.pb": true,
+ "google.generativeai.protos.GenerateTextRequest.prompt": true,
+ "google.generativeai.protos.GenerateTextRequest.safety_settings": true,
+ "google.generativeai.protos.GenerateTextRequest.serialize": true,
+ "google.generativeai.protos.GenerateTextRequest.stop_sequences": true,
+ "google.generativeai.protos.GenerateTextRequest.temperature": true,
+ "google.generativeai.protos.GenerateTextRequest.to_dict": true,
+ "google.generativeai.protos.GenerateTextRequest.to_json": true,
+ "google.generativeai.protos.GenerateTextRequest.top_k": true,
+ "google.generativeai.protos.GenerateTextRequest.top_p": true,
+ "google.generativeai.protos.GenerateTextRequest.wrap": true,
+ "google.generativeai.protos.GenerateTextResponse": false,
+ "google.generativeai.protos.GenerateTextResponse.__call__": true,
+ "google.generativeai.protos.GenerateTextResponse.__eq__": true,
+ "google.generativeai.protos.GenerateTextResponse.__ge__": true,
+ "google.generativeai.protos.GenerateTextResponse.__gt__": true,
+ "google.generativeai.protos.GenerateTextResponse.__init__": true,
+ "google.generativeai.protos.GenerateTextResponse.__le__": true,
+ "google.generativeai.protos.GenerateTextResponse.__lt__": true,
+ "google.generativeai.protos.GenerateTextResponse.__ne__": true,
+ "google.generativeai.protos.GenerateTextResponse.__new__": true,
+ "google.generativeai.protos.GenerateTextResponse.__or__": true,
+ "google.generativeai.protos.GenerateTextResponse.__ror__": true,
+ "google.generativeai.protos.GenerateTextResponse.candidates": true,
+ "google.generativeai.protos.GenerateTextResponse.copy_from": true,
+ "google.generativeai.protos.GenerateTextResponse.deserialize": true,
+ "google.generativeai.protos.GenerateTextResponse.filters": true,
+ "google.generativeai.protos.GenerateTextResponse.from_json": true,
+ "google.generativeai.protos.GenerateTextResponse.mro": true,
+ "google.generativeai.protos.GenerateTextResponse.pb": true,
+ "google.generativeai.protos.GenerateTextResponse.safety_feedback": true,
+ "google.generativeai.protos.GenerateTextResponse.serialize": true,
+ "google.generativeai.protos.GenerateTextResponse.to_dict": true,
+ "google.generativeai.protos.GenerateTextResponse.to_json": true,
+ "google.generativeai.protos.GenerateTextResponse.wrap": true,
+ "google.generativeai.protos.GenerationConfig": false,
+ "google.generativeai.protos.GenerationConfig.__call__": true,
+ "google.generativeai.protos.GenerationConfig.__eq__": true,
+ "google.generativeai.protos.GenerationConfig.__ge__": true,
+ "google.generativeai.protos.GenerationConfig.__gt__": true,
+ "google.generativeai.protos.GenerationConfig.__init__": true,
+ "google.generativeai.protos.GenerationConfig.__le__": true,
+ "google.generativeai.protos.GenerationConfig.__lt__": true,
+ "google.generativeai.protos.GenerationConfig.__ne__": true,
+ "google.generativeai.protos.GenerationConfig.__new__": true,
+ "google.generativeai.protos.GenerationConfig.__or__": true,
+ "google.generativeai.protos.GenerationConfig.__ror__": true,
+ "google.generativeai.protos.GenerationConfig.candidate_count": true,
+ "google.generativeai.protos.GenerationConfig.copy_from": true,
+ "google.generativeai.protos.GenerationConfig.deserialize": true,
+ "google.generativeai.protos.GenerationConfig.from_json": true,
+ "google.generativeai.protos.GenerationConfig.max_output_tokens": true,
+ "google.generativeai.protos.GenerationConfig.mro": true,
+ "google.generativeai.protos.GenerationConfig.pb": true,
+ "google.generativeai.protos.GenerationConfig.response_mime_type": true,
+ "google.generativeai.protos.GenerationConfig.response_schema": true,
+ "google.generativeai.protos.GenerationConfig.serialize": true,
+ "google.generativeai.protos.GenerationConfig.stop_sequences": true,
+ "google.generativeai.protos.GenerationConfig.temperature": true,
+ "google.generativeai.protos.GenerationConfig.to_dict": true,
+ "google.generativeai.protos.GenerationConfig.to_json": true,
+ "google.generativeai.protos.GenerationConfig.top_k": true,
+ "google.generativeai.protos.GenerationConfig.top_p": true,
+ "google.generativeai.protos.GenerationConfig.wrap": true,
+ "google.generativeai.protos.GetCachedContentRequest": false,
+ "google.generativeai.protos.GetCachedContentRequest.__call__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__init__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__le__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__new__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__or__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.GetCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.GetCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.GetCachedContentRequest.from_json": true,
+ "google.generativeai.protos.GetCachedContentRequest.mro": true,
+ "google.generativeai.protos.GetCachedContentRequest.name": true,
+ "google.generativeai.protos.GetCachedContentRequest.pb": true,
+ "google.generativeai.protos.GetCachedContentRequest.serialize": true,
+ "google.generativeai.protos.GetCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.GetCachedContentRequest.to_json": true,
+ "google.generativeai.protos.GetCachedContentRequest.wrap": true,
+ "google.generativeai.protos.GetChunkRequest": false,
+ "google.generativeai.protos.GetChunkRequest.__call__": true,
+ "google.generativeai.protos.GetChunkRequest.__eq__": true,
+ "google.generativeai.protos.GetChunkRequest.__ge__": true,
+ "google.generativeai.protos.GetChunkRequest.__gt__": true,
+ "google.generativeai.protos.GetChunkRequest.__init__": true,
+ "google.generativeai.protos.GetChunkRequest.__le__": true,
+ "google.generativeai.protos.GetChunkRequest.__lt__": true,
+ "google.generativeai.protos.GetChunkRequest.__ne__": true,
+ "google.generativeai.protos.GetChunkRequest.__new__": true,
+ "google.generativeai.protos.GetChunkRequest.__or__": true,
+ "google.generativeai.protos.GetChunkRequest.__ror__": true,
+ "google.generativeai.protos.GetChunkRequest.copy_from": true,
+ "google.generativeai.protos.GetChunkRequest.deserialize": true,
+ "google.generativeai.protos.GetChunkRequest.from_json": true,
+ "google.generativeai.protos.GetChunkRequest.mro": true,
+ "google.generativeai.protos.GetChunkRequest.name": true,
+ "google.generativeai.protos.GetChunkRequest.pb": true,
+ "google.generativeai.protos.GetChunkRequest.serialize": true,
+ "google.generativeai.protos.GetChunkRequest.to_dict": true,
+ "google.generativeai.protos.GetChunkRequest.to_json": true,
+ "google.generativeai.protos.GetChunkRequest.wrap": true,
+ "google.generativeai.protos.GetCorpusRequest": false,
+ "google.generativeai.protos.GetCorpusRequest.__call__": true,
+ "google.generativeai.protos.GetCorpusRequest.__eq__": true,
+ "google.generativeai.protos.GetCorpusRequest.__ge__": true,
+ "google.generativeai.protos.GetCorpusRequest.__gt__": true,
+ "google.generativeai.protos.GetCorpusRequest.__init__": true,
+ "google.generativeai.protos.GetCorpusRequest.__le__": true,
+ "google.generativeai.protos.GetCorpusRequest.__lt__": true,
+ "google.generativeai.protos.GetCorpusRequest.__ne__": true,
+ "google.generativeai.protos.GetCorpusRequest.__new__": true,
+ "google.generativeai.protos.GetCorpusRequest.__or__": true,
+ "google.generativeai.protos.GetCorpusRequest.__ror__": true,
+ "google.generativeai.protos.GetCorpusRequest.copy_from": true,
+ "google.generativeai.protos.GetCorpusRequest.deserialize": true,
+ "google.generativeai.protos.GetCorpusRequest.from_json": true,
+ "google.generativeai.protos.GetCorpusRequest.mro": true,
+ "google.generativeai.protos.GetCorpusRequest.name": true,
+ "google.generativeai.protos.GetCorpusRequest.pb": true,
+ "google.generativeai.protos.GetCorpusRequest.serialize": true,
+ "google.generativeai.protos.GetCorpusRequest.to_dict": true,
+ "google.generativeai.protos.GetCorpusRequest.to_json": true,
+ "google.generativeai.protos.GetCorpusRequest.wrap": true,
+ "google.generativeai.protos.GetDocumentRequest": false,
+ "google.generativeai.protos.GetDocumentRequest.__call__": true,
+ "google.generativeai.protos.GetDocumentRequest.__eq__": true,
+ "google.generativeai.protos.GetDocumentRequest.__ge__": true,
+ "google.generativeai.protos.GetDocumentRequest.__gt__": true,
+ "google.generativeai.protos.GetDocumentRequest.__init__": true,
+ "google.generativeai.protos.GetDocumentRequest.__le__": true,
+ "google.generativeai.protos.GetDocumentRequest.__lt__": true,
+ "google.generativeai.protos.GetDocumentRequest.__ne__": true,
+ "google.generativeai.protos.GetDocumentRequest.__new__": true,
+ "google.generativeai.protos.GetDocumentRequest.__or__": true,
+ "google.generativeai.protos.GetDocumentRequest.__ror__": true,
+ "google.generativeai.protos.GetDocumentRequest.copy_from": true,
+ "google.generativeai.protos.GetDocumentRequest.deserialize": true,
+ "google.generativeai.protos.GetDocumentRequest.from_json": true,
+ "google.generativeai.protos.GetDocumentRequest.mro": true,
+ "google.generativeai.protos.GetDocumentRequest.name": true,
+ "google.generativeai.protos.GetDocumentRequest.pb": true,
+ "google.generativeai.protos.GetDocumentRequest.serialize": true,
+ "google.generativeai.protos.GetDocumentRequest.to_dict": true,
+ "google.generativeai.protos.GetDocumentRequest.to_json": true,
+ "google.generativeai.protos.GetDocumentRequest.wrap": true,
+ "google.generativeai.protos.GetFileRequest": false,
+ "google.generativeai.protos.GetFileRequest.__call__": true,
+ "google.generativeai.protos.GetFileRequest.__eq__": true,
+ "google.generativeai.protos.GetFileRequest.__ge__": true,
+ "google.generativeai.protos.GetFileRequest.__gt__": true,
+ "google.generativeai.protos.GetFileRequest.__init__": true,
+ "google.generativeai.protos.GetFileRequest.__le__": true,
+ "google.generativeai.protos.GetFileRequest.__lt__": true,
+ "google.generativeai.protos.GetFileRequest.__ne__": true,
+ "google.generativeai.protos.GetFileRequest.__new__": true,
+ "google.generativeai.protos.GetFileRequest.__or__": true,
+ "google.generativeai.protos.GetFileRequest.__ror__": true,
+ "google.generativeai.protos.GetFileRequest.copy_from": true,
+ "google.generativeai.protos.GetFileRequest.deserialize": true,
+ "google.generativeai.protos.GetFileRequest.from_json": true,
+ "google.generativeai.protos.GetFileRequest.mro": true,
+ "google.generativeai.protos.GetFileRequest.name": true,
+ "google.generativeai.protos.GetFileRequest.pb": true,
+ "google.generativeai.protos.GetFileRequest.serialize": true,
+ "google.generativeai.protos.GetFileRequest.to_dict": true,
+ "google.generativeai.protos.GetFileRequest.to_json": true,
+ "google.generativeai.protos.GetFileRequest.wrap": true,
+ "google.generativeai.protos.GetModelRequest": false,
+ "google.generativeai.protos.GetModelRequest.__call__": true,
+ "google.generativeai.protos.GetModelRequest.__eq__": true,
+ "google.generativeai.protos.GetModelRequest.__ge__": true,
+ "google.generativeai.protos.GetModelRequest.__gt__": true,
+ "google.generativeai.protos.GetModelRequest.__init__": true,
+ "google.generativeai.protos.GetModelRequest.__le__": true,
+ "google.generativeai.protos.GetModelRequest.__lt__": true,
+ "google.generativeai.protos.GetModelRequest.__ne__": true,
+ "google.generativeai.protos.GetModelRequest.__new__": true,
+ "google.generativeai.protos.GetModelRequest.__or__": true,
+ "google.generativeai.protos.GetModelRequest.__ror__": true,
+ "google.generativeai.protos.GetModelRequest.copy_from": true,
+ "google.generativeai.protos.GetModelRequest.deserialize": true,
+ "google.generativeai.protos.GetModelRequest.from_json": true,
+ "google.generativeai.protos.GetModelRequest.mro": true,
+ "google.generativeai.protos.GetModelRequest.name": true,
+ "google.generativeai.protos.GetModelRequest.pb": true,
+ "google.generativeai.protos.GetModelRequest.serialize": true,
+ "google.generativeai.protos.GetModelRequest.to_dict": true,
+ "google.generativeai.protos.GetModelRequest.to_json": true,
+ "google.generativeai.protos.GetModelRequest.wrap": true,
+ "google.generativeai.protos.GetPermissionRequest": false,
+ "google.generativeai.protos.GetPermissionRequest.__call__": true,
+ "google.generativeai.protos.GetPermissionRequest.__eq__": true,
+ "google.generativeai.protos.GetPermissionRequest.__ge__": true,
+ "google.generativeai.protos.GetPermissionRequest.__gt__": true,
+ "google.generativeai.protos.GetPermissionRequest.__init__": true,
+ "google.generativeai.protos.GetPermissionRequest.__le__": true,
+ "google.generativeai.protos.GetPermissionRequest.__lt__": true,
+ "google.generativeai.protos.GetPermissionRequest.__ne__": true,
+ "google.generativeai.protos.GetPermissionRequest.__new__": true,
+ "google.generativeai.protos.GetPermissionRequest.__or__": true,
+ "google.generativeai.protos.GetPermissionRequest.__ror__": true,
+ "google.generativeai.protos.GetPermissionRequest.copy_from": true,
+ "google.generativeai.protos.GetPermissionRequest.deserialize": true,
+ "google.generativeai.protos.GetPermissionRequest.from_json": true,
+ "google.generativeai.protos.GetPermissionRequest.mro": true,
+ "google.generativeai.protos.GetPermissionRequest.name": true,
+ "google.generativeai.protos.GetPermissionRequest.pb": true,
+ "google.generativeai.protos.GetPermissionRequest.serialize": true,
+ "google.generativeai.protos.GetPermissionRequest.to_dict": true,
+ "google.generativeai.protos.GetPermissionRequest.to_json": true,
+ "google.generativeai.protos.GetPermissionRequest.wrap": true,
+ "google.generativeai.protos.GetTunedModelRequest": false,
+ "google.generativeai.protos.GetTunedModelRequest.__call__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__init__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__le__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__new__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__or__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.GetTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.GetTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.GetTunedModelRequest.from_json": true,
+ "google.generativeai.protos.GetTunedModelRequest.mro": true,
+ "google.generativeai.protos.GetTunedModelRequest.name": true,
+ "google.generativeai.protos.GetTunedModelRequest.pb": true,
+ "google.generativeai.protos.GetTunedModelRequest.serialize": true,
+ "google.generativeai.protos.GetTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.GetTunedModelRequest.to_json": true,
+ "google.generativeai.protos.GetTunedModelRequest.wrap": true,
+ "google.generativeai.protos.GroundingAttribution": false,
+ "google.generativeai.protos.GroundingAttribution.__call__": true,
+ "google.generativeai.protos.GroundingAttribution.__eq__": true,
+ "google.generativeai.protos.GroundingAttribution.__ge__": true,
+ "google.generativeai.protos.GroundingAttribution.__gt__": true,
+ "google.generativeai.protos.GroundingAttribution.__init__": true,
+ "google.generativeai.protos.GroundingAttribution.__le__": true,
+ "google.generativeai.protos.GroundingAttribution.__lt__": true,
+ "google.generativeai.protos.GroundingAttribution.__ne__": true,
+ "google.generativeai.protos.GroundingAttribution.__new__": true,
+ "google.generativeai.protos.GroundingAttribution.__or__": true,
+ "google.generativeai.protos.GroundingAttribution.__ror__": true,
+ "google.generativeai.protos.GroundingAttribution.content": true,
+ "google.generativeai.protos.GroundingAttribution.copy_from": true,
+ "google.generativeai.protos.GroundingAttribution.deserialize": true,
+ "google.generativeai.protos.GroundingAttribution.from_json": true,
+ "google.generativeai.protos.GroundingAttribution.mro": true,
+ "google.generativeai.protos.GroundingAttribution.pb": true,
+ "google.generativeai.protos.GroundingAttribution.serialize": true,
+ "google.generativeai.protos.GroundingAttribution.source_id": true,
+ "google.generativeai.protos.GroundingAttribution.to_dict": true,
+ "google.generativeai.protos.GroundingAttribution.to_json": true,
+ "google.generativeai.protos.GroundingAttribution.wrap": true,
+ "google.generativeai.protos.GroundingPassage": false,
+ "google.generativeai.protos.GroundingPassage.__call__": true,
+ "google.generativeai.protos.GroundingPassage.__eq__": true,
+ "google.generativeai.protos.GroundingPassage.__ge__": true,
+ "google.generativeai.protos.GroundingPassage.__gt__": true,
+ "google.generativeai.protos.GroundingPassage.__init__": true,
+ "google.generativeai.protos.GroundingPassage.__le__": true,
+ "google.generativeai.protos.GroundingPassage.__lt__": true,
+ "google.generativeai.protos.GroundingPassage.__ne__": true,
+ "google.generativeai.protos.GroundingPassage.__new__": true,
+ "google.generativeai.protos.GroundingPassage.__or__": true,
+ "google.generativeai.protos.GroundingPassage.__ror__": true,
+ "google.generativeai.protos.GroundingPassage.content": true,
+ "google.generativeai.protos.GroundingPassage.copy_from": true,
+ "google.generativeai.protos.GroundingPassage.deserialize": true,
+ "google.generativeai.protos.GroundingPassage.from_json": true,
+ "google.generativeai.protos.GroundingPassage.id": true,
+ "google.generativeai.protos.GroundingPassage.mro": true,
+ "google.generativeai.protos.GroundingPassage.pb": true,
+ "google.generativeai.protos.GroundingPassage.serialize": true,
+ "google.generativeai.protos.GroundingPassage.to_dict": true,
+ "google.generativeai.protos.GroundingPassage.to_json": true,
+ "google.generativeai.protos.GroundingPassage.wrap": true,
+ "google.generativeai.protos.GroundingPassages": false,
+ "google.generativeai.protos.GroundingPassages.__call__": true,
+ "google.generativeai.protos.GroundingPassages.__eq__": true,
+ "google.generativeai.protos.GroundingPassages.__ge__": true,
+ "google.generativeai.protos.GroundingPassages.__gt__": true,
+ "google.generativeai.protos.GroundingPassages.__init__": true,
+ "google.generativeai.protos.GroundingPassages.__le__": true,
+ "google.generativeai.protos.GroundingPassages.__lt__": true,
+ "google.generativeai.protos.GroundingPassages.__ne__": true,
+ "google.generativeai.protos.GroundingPassages.__new__": true,
+ "google.generativeai.protos.GroundingPassages.__or__": true,
+ "google.generativeai.protos.GroundingPassages.__ror__": true,
+ "google.generativeai.protos.GroundingPassages.copy_from": true,
+ "google.generativeai.protos.GroundingPassages.deserialize": true,
+ "google.generativeai.protos.GroundingPassages.from_json": true,
+ "google.generativeai.protos.GroundingPassages.mro": true,
+ "google.generativeai.protos.GroundingPassages.passages": true,
+ "google.generativeai.protos.GroundingPassages.pb": true,
+ "google.generativeai.protos.GroundingPassages.serialize": true,
+ "google.generativeai.protos.GroundingPassages.to_dict": true,
+ "google.generativeai.protos.GroundingPassages.to_json": true,
+ "google.generativeai.protos.GroundingPassages.wrap": true,
+ "google.generativeai.protos.HarmCategory": false,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DEROGATORY": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HARASSMENT": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_MEDICAL": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUAL": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_TOXICITY": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_VIOLENCE": true,
+ "google.generativeai.protos.HarmCategory.__abs__": true,
+ "google.generativeai.protos.HarmCategory.__add__": true,
+ "google.generativeai.protos.HarmCategory.__and__": true,
+ "google.generativeai.protos.HarmCategory.__bool__": true,
+ "google.generativeai.protos.HarmCategory.__contains__": true,
+ "google.generativeai.protos.HarmCategory.__eq__": true,
+ "google.generativeai.protos.HarmCategory.__floordiv__": true,
+ "google.generativeai.protos.HarmCategory.__ge__": true,
+ "google.generativeai.protos.HarmCategory.__getitem__": true,
+ "google.generativeai.protos.HarmCategory.__gt__": true,
+ "google.generativeai.protos.HarmCategory.__init__": true,
+ "google.generativeai.protos.HarmCategory.__invert__": true,
+ "google.generativeai.protos.HarmCategory.__iter__": true,
+ "google.generativeai.protos.HarmCategory.__le__": true,
+ "google.generativeai.protos.HarmCategory.__len__": true,
+ "google.generativeai.protos.HarmCategory.__lshift__": true,
+ "google.generativeai.protos.HarmCategory.__lt__": true,
+ "google.generativeai.protos.HarmCategory.__mod__": true,
+ "google.generativeai.protos.HarmCategory.__mul__": true,
+ "google.generativeai.protos.HarmCategory.__ne__": true,
+ "google.generativeai.protos.HarmCategory.__neg__": true,
+ "google.generativeai.protos.HarmCategory.__new__": true,
+ "google.generativeai.protos.HarmCategory.__or__": true,
+ "google.generativeai.protos.HarmCategory.__pos__": true,
+ "google.generativeai.protos.HarmCategory.__pow__": true,
+ "google.generativeai.protos.HarmCategory.__radd__": true,
+ "google.generativeai.protos.HarmCategory.__rand__": true,
+ "google.generativeai.protos.HarmCategory.__rfloordiv__": true,
+ "google.generativeai.protos.HarmCategory.__rlshift__": true,
+ "google.generativeai.protos.HarmCategory.__rmod__": true,
+ "google.generativeai.protos.HarmCategory.__rmul__": true,
+ "google.generativeai.protos.HarmCategory.__ror__": true,
+ "google.generativeai.protos.HarmCategory.__rpow__": true,
+ "google.generativeai.protos.HarmCategory.__rrshift__": true,
+ "google.generativeai.protos.HarmCategory.__rshift__": true,
+ "google.generativeai.protos.HarmCategory.__rsub__": true,
+ "google.generativeai.protos.HarmCategory.__rtruediv__": true,
+ "google.generativeai.protos.HarmCategory.__rxor__": true,
+ "google.generativeai.protos.HarmCategory.__sub__": true,
+ "google.generativeai.protos.HarmCategory.__truediv__": true,
+ "google.generativeai.protos.HarmCategory.__xor__": true,
+ "google.generativeai.protos.HarmCategory.as_integer_ratio": true,
+ "google.generativeai.protos.HarmCategory.bit_count": true,
+ "google.generativeai.protos.HarmCategory.bit_length": true,
+ "google.generativeai.protos.HarmCategory.conjugate": true,
+ "google.generativeai.protos.HarmCategory.denominator": true,
+ "google.generativeai.protos.HarmCategory.from_bytes": true,
+ "google.generativeai.protos.HarmCategory.imag": true,
+ "google.generativeai.protos.HarmCategory.numerator": true,
+ "google.generativeai.protos.HarmCategory.real": true,
+ "google.generativeai.protos.HarmCategory.to_bytes": true,
+ "google.generativeai.protos.Hyperparameters": false,
+ "google.generativeai.protos.Hyperparameters.__call__": true,
+ "google.generativeai.protos.Hyperparameters.__eq__": true,
+ "google.generativeai.protos.Hyperparameters.__ge__": true,
+ "google.generativeai.protos.Hyperparameters.__gt__": true,
+ "google.generativeai.protos.Hyperparameters.__init__": true,
+ "google.generativeai.protos.Hyperparameters.__le__": true,
+ "google.generativeai.protos.Hyperparameters.__lt__": true,
+ "google.generativeai.protos.Hyperparameters.__ne__": true,
+ "google.generativeai.protos.Hyperparameters.__new__": true,
+ "google.generativeai.protos.Hyperparameters.__or__": true,
+ "google.generativeai.protos.Hyperparameters.__ror__": true,
+ "google.generativeai.protos.Hyperparameters.batch_size": true,
+ "google.generativeai.protos.Hyperparameters.copy_from": true,
+ "google.generativeai.protos.Hyperparameters.deserialize": true,
+ "google.generativeai.protos.Hyperparameters.epoch_count": true,
+ "google.generativeai.protos.Hyperparameters.from_json": true,
+ "google.generativeai.protos.Hyperparameters.learning_rate": true,
+ "google.generativeai.protos.Hyperparameters.learning_rate_multiplier": true,
+ "google.generativeai.protos.Hyperparameters.mro": true,
+ "google.generativeai.protos.Hyperparameters.pb": true,
+ "google.generativeai.protos.Hyperparameters.serialize": true,
+ "google.generativeai.protos.Hyperparameters.to_dict": true,
+ "google.generativeai.protos.Hyperparameters.to_json": true,
+ "google.generativeai.protos.Hyperparameters.wrap": true,
+ "google.generativeai.protos.ListCachedContentsRequest": false,
+ "google.generativeai.protos.ListCachedContentsRequest.__call__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__eq__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__ge__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__gt__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__init__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__le__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__lt__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__ne__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__new__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__or__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__ror__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.copy_from": true,
+ "google.generativeai.protos.ListCachedContentsRequest.deserialize": true,
+ "google.generativeai.protos.ListCachedContentsRequest.from_json": true,
+ "google.generativeai.protos.ListCachedContentsRequest.mro": true,
+ "google.generativeai.protos.ListCachedContentsRequest.page_size": true,
+ "google.generativeai.protos.ListCachedContentsRequest.page_token": true,
+ "google.generativeai.protos.ListCachedContentsRequest.pb": true,
+ "google.generativeai.protos.ListCachedContentsRequest.serialize": true,
+ "google.generativeai.protos.ListCachedContentsRequest.to_dict": true,
+ "google.generativeai.protos.ListCachedContentsRequest.to_json": true,
+ "google.generativeai.protos.ListCachedContentsRequest.wrap": true,
+ "google.generativeai.protos.ListCachedContentsResponse": false,
+ "google.generativeai.protos.ListCachedContentsResponse.__call__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__eq__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__ge__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__gt__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__init__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__le__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__lt__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__ne__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__new__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__or__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__ror__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.cached_contents": true,
+ "google.generativeai.protos.ListCachedContentsResponse.copy_from": true,
+ "google.generativeai.protos.ListCachedContentsResponse.deserialize": true,
+ "google.generativeai.protos.ListCachedContentsResponse.from_json": true,
+ "google.generativeai.protos.ListCachedContentsResponse.mro": true,
+ "google.generativeai.protos.ListCachedContentsResponse.next_page_token": true,
+ "google.generativeai.protos.ListCachedContentsResponse.pb": true,
+ "google.generativeai.protos.ListCachedContentsResponse.serialize": true,
+ "google.generativeai.protos.ListCachedContentsResponse.to_dict": true,
+ "google.generativeai.protos.ListCachedContentsResponse.to_json": true,
+ "google.generativeai.protos.ListCachedContentsResponse.wrap": true,
+ "google.generativeai.protos.ListChunksRequest": false,
+ "google.generativeai.protos.ListChunksRequest.__call__": true,
+ "google.generativeai.protos.ListChunksRequest.__eq__": true,
+ "google.generativeai.protos.ListChunksRequest.__ge__": true,
+ "google.generativeai.protos.ListChunksRequest.__gt__": true,
+ "google.generativeai.protos.ListChunksRequest.__init__": true,
+ "google.generativeai.protos.ListChunksRequest.__le__": true,
+ "google.generativeai.protos.ListChunksRequest.__lt__": true,
+ "google.generativeai.protos.ListChunksRequest.__ne__": true,
+ "google.generativeai.protos.ListChunksRequest.__new__": true,
+ "google.generativeai.protos.ListChunksRequest.__or__": true,
+ "google.generativeai.protos.ListChunksRequest.__ror__": true,
+ "google.generativeai.protos.ListChunksRequest.copy_from": true,
+ "google.generativeai.protos.ListChunksRequest.deserialize": true,
+ "google.generativeai.protos.ListChunksRequest.from_json": true,
+ "google.generativeai.protos.ListChunksRequest.mro": true,
+ "google.generativeai.protos.ListChunksRequest.page_size": true,
+ "google.generativeai.protos.ListChunksRequest.page_token": true,
+ "google.generativeai.protos.ListChunksRequest.parent": true,
+ "google.generativeai.protos.ListChunksRequest.pb": true,
+ "google.generativeai.protos.ListChunksRequest.serialize": true,
+ "google.generativeai.protos.ListChunksRequest.to_dict": true,
+ "google.generativeai.protos.ListChunksRequest.to_json": true,
+ "google.generativeai.protos.ListChunksRequest.wrap": true,
+ "google.generativeai.protos.ListChunksResponse": false,
+ "google.generativeai.protos.ListChunksResponse.__call__": true,
+ "google.generativeai.protos.ListChunksResponse.__eq__": true,
+ "google.generativeai.protos.ListChunksResponse.__ge__": true,
+ "google.generativeai.protos.ListChunksResponse.__gt__": true,
+ "google.generativeai.protos.ListChunksResponse.__init__": true,
+ "google.generativeai.protos.ListChunksResponse.__le__": true,
+ "google.generativeai.protos.ListChunksResponse.__lt__": true,
+ "google.generativeai.protos.ListChunksResponse.__ne__": true,
+ "google.generativeai.protos.ListChunksResponse.__new__": true,
+ "google.generativeai.protos.ListChunksResponse.__or__": true,
+ "google.generativeai.protos.ListChunksResponse.__ror__": true,
+ "google.generativeai.protos.ListChunksResponse.chunks": true,
+ "google.generativeai.protos.ListChunksResponse.copy_from": true,
+ "google.generativeai.protos.ListChunksResponse.deserialize": true,
+ "google.generativeai.protos.ListChunksResponse.from_json": true,
+ "google.generativeai.protos.ListChunksResponse.mro": true,
+ "google.generativeai.protos.ListChunksResponse.next_page_token": true,
+ "google.generativeai.protos.ListChunksResponse.pb": true,
+ "google.generativeai.protos.ListChunksResponse.serialize": true,
+ "google.generativeai.protos.ListChunksResponse.to_dict": true,
+ "google.generativeai.protos.ListChunksResponse.to_json": true,
+ "google.generativeai.protos.ListChunksResponse.wrap": true,
+ "google.generativeai.protos.ListCorporaRequest": false,
+ "google.generativeai.protos.ListCorporaRequest.__call__": true,
+ "google.generativeai.protos.ListCorporaRequest.__eq__": true,
+ "google.generativeai.protos.ListCorporaRequest.__ge__": true,
+ "google.generativeai.protos.ListCorporaRequest.__gt__": true,
+ "google.generativeai.protos.ListCorporaRequest.__init__": true,
+ "google.generativeai.protos.ListCorporaRequest.__le__": true,
+ "google.generativeai.protos.ListCorporaRequest.__lt__": true,
+ "google.generativeai.protos.ListCorporaRequest.__ne__": true,
+ "google.generativeai.protos.ListCorporaRequest.__new__": true,
+ "google.generativeai.protos.ListCorporaRequest.__or__": true,
+ "google.generativeai.protos.ListCorporaRequest.__ror__": true,
+ "google.generativeai.protos.ListCorporaRequest.copy_from": true,
+ "google.generativeai.protos.ListCorporaRequest.deserialize": true,
+ "google.generativeai.protos.ListCorporaRequest.from_json": true,
+ "google.generativeai.protos.ListCorporaRequest.mro": true,
+ "google.generativeai.protos.ListCorporaRequest.page_size": true,
+ "google.generativeai.protos.ListCorporaRequest.page_token": true,
+ "google.generativeai.protos.ListCorporaRequest.pb": true,
+ "google.generativeai.protos.ListCorporaRequest.serialize": true,
+ "google.generativeai.protos.ListCorporaRequest.to_dict": true,
+ "google.generativeai.protos.ListCorporaRequest.to_json": true,
+ "google.generativeai.protos.ListCorporaRequest.wrap": true,
+ "google.generativeai.protos.ListCorporaResponse": false,
+ "google.generativeai.protos.ListCorporaResponse.__call__": true,
+ "google.generativeai.protos.ListCorporaResponse.__eq__": true,
+ "google.generativeai.protos.ListCorporaResponse.__ge__": true,
+ "google.generativeai.protos.ListCorporaResponse.__gt__": true,
+ "google.generativeai.protos.ListCorporaResponse.__init__": true,
+ "google.generativeai.protos.ListCorporaResponse.__le__": true,
+ "google.generativeai.protos.ListCorporaResponse.__lt__": true,
+ "google.generativeai.protos.ListCorporaResponse.__ne__": true,
+ "google.generativeai.protos.ListCorporaResponse.__new__": true,
+ "google.generativeai.protos.ListCorporaResponse.__or__": true,
+ "google.generativeai.protos.ListCorporaResponse.__ror__": true,
+ "google.generativeai.protos.ListCorporaResponse.copy_from": true,
+ "google.generativeai.protos.ListCorporaResponse.corpora": true,
+ "google.generativeai.protos.ListCorporaResponse.deserialize": true,
+ "google.generativeai.protos.ListCorporaResponse.from_json": true,
+ "google.generativeai.protos.ListCorporaResponse.mro": true,
+ "google.generativeai.protos.ListCorporaResponse.next_page_token": true,
+ "google.generativeai.protos.ListCorporaResponse.pb": true,
+ "google.generativeai.protos.ListCorporaResponse.serialize": true,
+ "google.generativeai.protos.ListCorporaResponse.to_dict": true,
+ "google.generativeai.protos.ListCorporaResponse.to_json": true,
+ "google.generativeai.protos.ListCorporaResponse.wrap": true,
+ "google.generativeai.protos.ListDocumentsRequest": false,
+ "google.generativeai.protos.ListDocumentsRequest.__call__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__eq__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__ge__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__gt__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__init__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__le__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__lt__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__ne__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__new__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__or__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__ror__": true,
+ "google.generativeai.protos.ListDocumentsRequest.copy_from": true,
+ "google.generativeai.protos.ListDocumentsRequest.deserialize": true,
+ "google.generativeai.protos.ListDocumentsRequest.from_json": true,
+ "google.generativeai.protos.ListDocumentsRequest.mro": true,
+ "google.generativeai.protos.ListDocumentsRequest.page_size": true,
+ "google.generativeai.protos.ListDocumentsRequest.page_token": true,
+ "google.generativeai.protos.ListDocumentsRequest.parent": true,
+ "google.generativeai.protos.ListDocumentsRequest.pb": true,
+ "google.generativeai.protos.ListDocumentsRequest.serialize": true,
+ "google.generativeai.protos.ListDocumentsRequest.to_dict": true,
+ "google.generativeai.protos.ListDocumentsRequest.to_json": true,
+ "google.generativeai.protos.ListDocumentsRequest.wrap": true,
+ "google.generativeai.protos.ListDocumentsResponse": false,
+ "google.generativeai.protos.ListDocumentsResponse.__call__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__eq__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__ge__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__gt__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__init__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__le__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__lt__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__ne__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__new__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__or__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__ror__": true,
+ "google.generativeai.protos.ListDocumentsResponse.copy_from": true,
+ "google.generativeai.protos.ListDocumentsResponse.deserialize": true,
+ "google.generativeai.protos.ListDocumentsResponse.documents": true,
+ "google.generativeai.protos.ListDocumentsResponse.from_json": true,
+ "google.generativeai.protos.ListDocumentsResponse.mro": true,
+ "google.generativeai.protos.ListDocumentsResponse.next_page_token": true,
+ "google.generativeai.protos.ListDocumentsResponse.pb": true,
+ "google.generativeai.protos.ListDocumentsResponse.serialize": true,
+ "google.generativeai.protos.ListDocumentsResponse.to_dict": true,
+ "google.generativeai.protos.ListDocumentsResponse.to_json": true,
+ "google.generativeai.protos.ListDocumentsResponse.wrap": true,
+ "google.generativeai.protos.ListFilesRequest": false,
+ "google.generativeai.protos.ListFilesRequest.__call__": true,
+ "google.generativeai.protos.ListFilesRequest.__eq__": true,
+ "google.generativeai.protos.ListFilesRequest.__ge__": true,
+ "google.generativeai.protos.ListFilesRequest.__gt__": true,
+ "google.generativeai.protos.ListFilesRequest.__init__": true,
+ "google.generativeai.protos.ListFilesRequest.__le__": true,
+ "google.generativeai.protos.ListFilesRequest.__lt__": true,
+ "google.generativeai.protos.ListFilesRequest.__ne__": true,
+ "google.generativeai.protos.ListFilesRequest.__new__": true,
+ "google.generativeai.protos.ListFilesRequest.__or__": true,
+ "google.generativeai.protos.ListFilesRequest.__ror__": true,
+ "google.generativeai.protos.ListFilesRequest.copy_from": true,
+ "google.generativeai.protos.ListFilesRequest.deserialize": true,
+ "google.generativeai.protos.ListFilesRequest.from_json": true,
+ "google.generativeai.protos.ListFilesRequest.mro": true,
+ "google.generativeai.protos.ListFilesRequest.page_size": true,
+ "google.generativeai.protos.ListFilesRequest.page_token": true,
+ "google.generativeai.protos.ListFilesRequest.pb": true,
+ "google.generativeai.protos.ListFilesRequest.serialize": true,
+ "google.generativeai.protos.ListFilesRequest.to_dict": true,
+ "google.generativeai.protos.ListFilesRequest.to_json": true,
+ "google.generativeai.protos.ListFilesRequest.wrap": true,
+ "google.generativeai.protos.ListFilesResponse": false,
+ "google.generativeai.protos.ListFilesResponse.__call__": true,
+ "google.generativeai.protos.ListFilesResponse.__eq__": true,
+ "google.generativeai.protos.ListFilesResponse.__ge__": true,
+ "google.generativeai.protos.ListFilesResponse.__gt__": true,
+ "google.generativeai.protos.ListFilesResponse.__init__": true,
+ "google.generativeai.protos.ListFilesResponse.__le__": true,
+ "google.generativeai.protos.ListFilesResponse.__lt__": true,
+ "google.generativeai.protos.ListFilesResponse.__ne__": true,
+ "google.generativeai.protos.ListFilesResponse.__new__": true,
+ "google.generativeai.protos.ListFilesResponse.__or__": true,
+ "google.generativeai.protos.ListFilesResponse.__ror__": true,
+ "google.generativeai.protos.ListFilesResponse.copy_from": true,
+ "google.generativeai.protos.ListFilesResponse.deserialize": true,
+ "google.generativeai.protos.ListFilesResponse.files": true,
+ "google.generativeai.protos.ListFilesResponse.from_json": true,
+ "google.generativeai.protos.ListFilesResponse.mro": true,
+ "google.generativeai.protos.ListFilesResponse.next_page_token": true,
+ "google.generativeai.protos.ListFilesResponse.pb": true,
+ "google.generativeai.protos.ListFilesResponse.serialize": true,
+ "google.generativeai.protos.ListFilesResponse.to_dict": true,
+ "google.generativeai.protos.ListFilesResponse.to_json": true,
+ "google.generativeai.protos.ListFilesResponse.wrap": true,
+ "google.generativeai.protos.ListModelsRequest": false,
+ "google.generativeai.protos.ListModelsRequest.__call__": true,
+ "google.generativeai.protos.ListModelsRequest.__eq__": true,
+ "google.generativeai.protos.ListModelsRequest.__ge__": true,
+ "google.generativeai.protos.ListModelsRequest.__gt__": true,
+ "google.generativeai.protos.ListModelsRequest.__init__": true,
+ "google.generativeai.protos.ListModelsRequest.__le__": true,
+ "google.generativeai.protos.ListModelsRequest.__lt__": true,
+ "google.generativeai.protos.ListModelsRequest.__ne__": true,
+ "google.generativeai.protos.ListModelsRequest.__new__": true,
+ "google.generativeai.protos.ListModelsRequest.__or__": true,
+ "google.generativeai.protos.ListModelsRequest.__ror__": true,
+ "google.generativeai.protos.ListModelsRequest.copy_from": true,
+ "google.generativeai.protos.ListModelsRequest.deserialize": true,
+ "google.generativeai.protos.ListModelsRequest.from_json": true,
+ "google.generativeai.protos.ListModelsRequest.mro": true,
+ "google.generativeai.protos.ListModelsRequest.page_size": true,
+ "google.generativeai.protos.ListModelsRequest.page_token": true,
+ "google.generativeai.protos.ListModelsRequest.pb": true,
+ "google.generativeai.protos.ListModelsRequest.serialize": true,
+ "google.generativeai.protos.ListModelsRequest.to_dict": true,
+ "google.generativeai.protos.ListModelsRequest.to_json": true,
+ "google.generativeai.protos.ListModelsRequest.wrap": true,
+ "google.generativeai.protos.ListModelsResponse": false,
+ "google.generativeai.protos.ListModelsResponse.__call__": true,
+ "google.generativeai.protos.ListModelsResponse.__eq__": true,
+ "google.generativeai.protos.ListModelsResponse.__ge__": true,
+ "google.generativeai.protos.ListModelsResponse.__gt__": true,
+ "google.generativeai.protos.ListModelsResponse.__init__": true,
+ "google.generativeai.protos.ListModelsResponse.__le__": true,
+ "google.generativeai.protos.ListModelsResponse.__lt__": true,
+ "google.generativeai.protos.ListModelsResponse.__ne__": true,
+ "google.generativeai.protos.ListModelsResponse.__new__": true,
+ "google.generativeai.protos.ListModelsResponse.__or__": true,
+ "google.generativeai.protos.ListModelsResponse.__ror__": true,
+ "google.generativeai.protos.ListModelsResponse.copy_from": true,
+ "google.generativeai.protos.ListModelsResponse.deserialize": true,
+ "google.generativeai.protos.ListModelsResponse.from_json": true,
+ "google.generativeai.protos.ListModelsResponse.models": true,
+ "google.generativeai.protos.ListModelsResponse.mro": true,
+ "google.generativeai.protos.ListModelsResponse.next_page_token": true,
+ "google.generativeai.protos.ListModelsResponse.pb": true,
+ "google.generativeai.protos.ListModelsResponse.serialize": true,
+ "google.generativeai.protos.ListModelsResponse.to_dict": true,
+ "google.generativeai.protos.ListModelsResponse.to_json": true,
+ "google.generativeai.protos.ListModelsResponse.wrap": true,
+ "google.generativeai.protos.ListPermissionsRequest": false,
+ "google.generativeai.protos.ListPermissionsRequest.__call__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__eq__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__ge__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__gt__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__init__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__le__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__lt__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__ne__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__new__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__or__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__ror__": true,
+ "google.generativeai.protos.ListPermissionsRequest.copy_from": true,
+ "google.generativeai.protos.ListPermissionsRequest.deserialize": true,
+ "google.generativeai.protos.ListPermissionsRequest.from_json": true,
+ "google.generativeai.protos.ListPermissionsRequest.mro": true,
+ "google.generativeai.protos.ListPermissionsRequest.page_size": true,
+ "google.generativeai.protos.ListPermissionsRequest.page_token": true,
+ "google.generativeai.protos.ListPermissionsRequest.parent": true,
+ "google.generativeai.protos.ListPermissionsRequest.pb": true,
+ "google.generativeai.protos.ListPermissionsRequest.serialize": true,
+ "google.generativeai.protos.ListPermissionsRequest.to_dict": true,
+ "google.generativeai.protos.ListPermissionsRequest.to_json": true,
+ "google.generativeai.protos.ListPermissionsRequest.wrap": true,
+ "google.generativeai.protos.ListPermissionsResponse": false,
+ "google.generativeai.protos.ListPermissionsResponse.__call__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__eq__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__ge__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__gt__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__init__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__le__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__lt__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__ne__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__new__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__or__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__ror__": true,
+ "google.generativeai.protos.ListPermissionsResponse.copy_from": true,
+ "google.generativeai.protos.ListPermissionsResponse.deserialize": true,
+ "google.generativeai.protos.ListPermissionsResponse.from_json": true,
+ "google.generativeai.protos.ListPermissionsResponse.mro": true,
+ "google.generativeai.protos.ListPermissionsResponse.next_page_token": true,
+ "google.generativeai.protos.ListPermissionsResponse.pb": true,
+ "google.generativeai.protos.ListPermissionsResponse.permissions": true,
+ "google.generativeai.protos.ListPermissionsResponse.serialize": true,
+ "google.generativeai.protos.ListPermissionsResponse.to_dict": true,
+ "google.generativeai.protos.ListPermissionsResponse.to_json": true,
+ "google.generativeai.protos.ListPermissionsResponse.wrap": true,
+ "google.generativeai.protos.ListTunedModelsRequest": false,
+ "google.generativeai.protos.ListTunedModelsRequest.__call__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__eq__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__ge__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__gt__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__init__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__le__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__lt__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__ne__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__new__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__or__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__ror__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.copy_from": true,
+ "google.generativeai.protos.ListTunedModelsRequest.deserialize": true,
+ "google.generativeai.protos.ListTunedModelsRequest.filter": true,
+ "google.generativeai.protos.ListTunedModelsRequest.from_json": true,
+ "google.generativeai.protos.ListTunedModelsRequest.mro": true,
+ "google.generativeai.protos.ListTunedModelsRequest.page_size": true,
+ "google.generativeai.protos.ListTunedModelsRequest.page_token": true,
+ "google.generativeai.protos.ListTunedModelsRequest.pb": true,
+ "google.generativeai.protos.ListTunedModelsRequest.serialize": true,
+ "google.generativeai.protos.ListTunedModelsRequest.to_dict": true,
+ "google.generativeai.protos.ListTunedModelsRequest.to_json": true,
+ "google.generativeai.protos.ListTunedModelsRequest.wrap": true,
+ "google.generativeai.protos.ListTunedModelsResponse": false,
+ "google.generativeai.protos.ListTunedModelsResponse.__call__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__eq__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__ge__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__gt__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__init__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__le__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__lt__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__ne__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__new__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__or__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__ror__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.copy_from": true,
+ "google.generativeai.protos.ListTunedModelsResponse.deserialize": true,
+ "google.generativeai.protos.ListTunedModelsResponse.from_json": true,
+ "google.generativeai.protos.ListTunedModelsResponse.mro": true,
+ "google.generativeai.protos.ListTunedModelsResponse.next_page_token": true,
+ "google.generativeai.protos.ListTunedModelsResponse.pb": true,
+ "google.generativeai.protos.ListTunedModelsResponse.serialize": true,
+ "google.generativeai.protos.ListTunedModelsResponse.to_dict": true,
+ "google.generativeai.protos.ListTunedModelsResponse.to_json": true,
+ "google.generativeai.protos.ListTunedModelsResponse.tuned_models": true,
+ "google.generativeai.protos.ListTunedModelsResponse.wrap": true,
+ "google.generativeai.protos.Message": false,
+ "google.generativeai.protos.Message.__call__": true,
+ "google.generativeai.protos.Message.__eq__": true,
+ "google.generativeai.protos.Message.__ge__": true,
+ "google.generativeai.protos.Message.__gt__": true,
+ "google.generativeai.protos.Message.__init__": true,
+ "google.generativeai.protos.Message.__le__": true,
+ "google.generativeai.protos.Message.__lt__": true,
+ "google.generativeai.protos.Message.__ne__": true,
+ "google.generativeai.protos.Message.__new__": true,
+ "google.generativeai.protos.Message.__or__": true,
+ "google.generativeai.protos.Message.__ror__": true,
+ "google.generativeai.protos.Message.author": true,
+ "google.generativeai.protos.Message.citation_metadata": true,
+ "google.generativeai.protos.Message.content": true,
+ "google.generativeai.protos.Message.copy_from": true,
+ "google.generativeai.protos.Message.deserialize": true,
+ "google.generativeai.protos.Message.from_json": true,
+ "google.generativeai.protos.Message.mro": true,
+ "google.generativeai.protos.Message.pb": true,
+ "google.generativeai.protos.Message.serialize": true,
+ "google.generativeai.protos.Message.to_dict": true,
+ "google.generativeai.protos.Message.to_json": true,
+ "google.generativeai.protos.Message.wrap": true,
+ "google.generativeai.protos.MessagePrompt": false,
+ "google.generativeai.protos.MessagePrompt.__call__": true,
+ "google.generativeai.protos.MessagePrompt.__eq__": true,
+ "google.generativeai.protos.MessagePrompt.__ge__": true,
+ "google.generativeai.protos.MessagePrompt.__gt__": true,
+ "google.generativeai.protos.MessagePrompt.__init__": true,
+ "google.generativeai.protos.MessagePrompt.__le__": true,
+ "google.generativeai.protos.MessagePrompt.__lt__": true,
+ "google.generativeai.protos.MessagePrompt.__ne__": true,
+ "google.generativeai.protos.MessagePrompt.__new__": true,
+ "google.generativeai.protos.MessagePrompt.__or__": true,
+ "google.generativeai.protos.MessagePrompt.__ror__": true,
+ "google.generativeai.protos.MessagePrompt.context": true,
+ "google.generativeai.protos.MessagePrompt.copy_from": true,
+ "google.generativeai.protos.MessagePrompt.deserialize": true,
+ "google.generativeai.protos.MessagePrompt.examples": true,
+ "google.generativeai.protos.MessagePrompt.from_json": true,
+ "google.generativeai.protos.MessagePrompt.messages": true,
+ "google.generativeai.protos.MessagePrompt.mro": true,
+ "google.generativeai.protos.MessagePrompt.pb": true,
+ "google.generativeai.protos.MessagePrompt.serialize": true,
+ "google.generativeai.protos.MessagePrompt.to_dict": true,
+ "google.generativeai.protos.MessagePrompt.to_json": true,
+ "google.generativeai.protos.MessagePrompt.wrap": true,
+ "google.generativeai.protos.MetadataFilter": false,
+ "google.generativeai.protos.MetadataFilter.__call__": true,
+ "google.generativeai.protos.MetadataFilter.__eq__": true,
+ "google.generativeai.protos.MetadataFilter.__ge__": true,
+ "google.generativeai.protos.MetadataFilter.__gt__": true,
+ "google.generativeai.protos.MetadataFilter.__init__": true,
+ "google.generativeai.protos.MetadataFilter.__le__": true,
+ "google.generativeai.protos.MetadataFilter.__lt__": true,
+ "google.generativeai.protos.MetadataFilter.__ne__": true,
+ "google.generativeai.protos.MetadataFilter.__new__": true,
+ "google.generativeai.protos.MetadataFilter.__or__": true,
+ "google.generativeai.protos.MetadataFilter.__ror__": true,
+ "google.generativeai.protos.MetadataFilter.conditions": true,
+ "google.generativeai.protos.MetadataFilter.copy_from": true,
+ "google.generativeai.protos.MetadataFilter.deserialize": true,
+ "google.generativeai.protos.MetadataFilter.from_json": true,
+ "google.generativeai.protos.MetadataFilter.key": true,
+ "google.generativeai.protos.MetadataFilter.mro": true,
+ "google.generativeai.protos.MetadataFilter.pb": true,
+ "google.generativeai.protos.MetadataFilter.serialize": true,
+ "google.generativeai.protos.MetadataFilter.to_dict": true,
+ "google.generativeai.protos.MetadataFilter.to_json": true,
+ "google.generativeai.protos.MetadataFilter.wrap": true,
+ "google.generativeai.protos.Model": false,
+ "google.generativeai.protos.Model.__call__": true,
+ "google.generativeai.protos.Model.__eq__": true,
+ "google.generativeai.protos.Model.__ge__": true,
+ "google.generativeai.protos.Model.__gt__": true,
+ "google.generativeai.protos.Model.__init__": true,
+ "google.generativeai.protos.Model.__le__": true,
+ "google.generativeai.protos.Model.__lt__": true,
+ "google.generativeai.protos.Model.__ne__": true,
+ "google.generativeai.protos.Model.__new__": true,
+ "google.generativeai.protos.Model.__or__": true,
+ "google.generativeai.protos.Model.__ror__": true,
+ "google.generativeai.protos.Model.base_model_id": true,
+ "google.generativeai.protos.Model.copy_from": true,
+ "google.generativeai.protos.Model.description": true,
+ "google.generativeai.protos.Model.deserialize": true,
+ "google.generativeai.protos.Model.display_name": true,
+ "google.generativeai.protos.Model.from_json": true,
+ "google.generativeai.protos.Model.input_token_limit": true,
+ "google.generativeai.protos.Model.max_temperature": true,
+ "google.generativeai.protos.Model.mro": true,
+ "google.generativeai.protos.Model.name": true,
+ "google.generativeai.protos.Model.output_token_limit": true,
+ "google.generativeai.protos.Model.pb": true,
+ "google.generativeai.protos.Model.serialize": true,
+ "google.generativeai.protos.Model.supported_generation_methods": true,
+ "google.generativeai.protos.Model.temperature": true,
+ "google.generativeai.protos.Model.to_dict": true,
+ "google.generativeai.protos.Model.to_json": true,
+ "google.generativeai.protos.Model.top_k": true,
+ "google.generativeai.protos.Model.top_p": true,
+ "google.generativeai.protos.Model.version": true,
+ "google.generativeai.protos.Model.wrap": true,
+ "google.generativeai.protos.Part": false,
+ "google.generativeai.protos.Part.__call__": true,
+ "google.generativeai.protos.Part.__eq__": true,
+ "google.generativeai.protos.Part.__ge__": true,
+ "google.generativeai.protos.Part.__gt__": true,
+ "google.generativeai.protos.Part.__init__": true,
+ "google.generativeai.protos.Part.__le__": true,
+ "google.generativeai.protos.Part.__lt__": true,
+ "google.generativeai.protos.Part.__ne__": true,
+ "google.generativeai.protos.Part.__new__": true,
+ "google.generativeai.protos.Part.__or__": true,
+ "google.generativeai.protos.Part.__ror__": true,
+ "google.generativeai.protos.Part.code_execution_result": true,
+ "google.generativeai.protos.Part.copy_from": true,
+ "google.generativeai.protos.Part.deserialize": true,
+ "google.generativeai.protos.Part.executable_code": true,
+ "google.generativeai.protos.Part.file_data": true,
+ "google.generativeai.protos.Part.from_json": true,
+ "google.generativeai.protos.Part.function_call": true,
+ "google.generativeai.protos.Part.function_response": true,
+ "google.generativeai.protos.Part.inline_data": true,
+ "google.generativeai.protos.Part.mro": true,
+ "google.generativeai.protos.Part.pb": true,
+ "google.generativeai.protos.Part.serialize": true,
+ "google.generativeai.protos.Part.text": true,
+ "google.generativeai.protos.Part.to_dict": true,
+ "google.generativeai.protos.Part.to_json": true,
+ "google.generativeai.protos.Part.wrap": true,
+ "google.generativeai.protos.Permission": false,
+ "google.generativeai.protos.Permission.GranteeType": false,
+ "google.generativeai.protos.Permission.GranteeType.EVERYONE": true,
+ "google.generativeai.protos.Permission.GranteeType.GRANTEE_TYPE_UNSPECIFIED": true,
+ "google.generativeai.protos.Permission.GranteeType.GROUP": true,
+ "google.generativeai.protos.Permission.GranteeType.USER": true,
+ "google.generativeai.protos.Permission.GranteeType.__abs__": true,
+ "google.generativeai.protos.Permission.GranteeType.__add__": true,
+ "google.generativeai.protos.Permission.GranteeType.__and__": true,
+ "google.generativeai.protos.Permission.GranteeType.__bool__": true,
+ "google.generativeai.protos.Permission.GranteeType.__contains__": true,
+ "google.generativeai.protos.Permission.GranteeType.__eq__": true,
+ "google.generativeai.protos.Permission.GranteeType.__floordiv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__ge__": true,
+ "google.generativeai.protos.Permission.GranteeType.__getitem__": true,
+ "google.generativeai.protos.Permission.GranteeType.__gt__": true,
+ "google.generativeai.protos.Permission.GranteeType.__init__": true,
+ "google.generativeai.protos.Permission.GranteeType.__invert__": true,
+ "google.generativeai.protos.Permission.GranteeType.__iter__": true,
+ "google.generativeai.protos.Permission.GranteeType.__le__": true,
+ "google.generativeai.protos.Permission.GranteeType.__len__": true,
+ "google.generativeai.protos.Permission.GranteeType.__lshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__lt__": true,
+ "google.generativeai.protos.Permission.GranteeType.__mod__": true,
+ "google.generativeai.protos.Permission.GranteeType.__mul__": true,
+ "google.generativeai.protos.Permission.GranteeType.__ne__": true,
+ "google.generativeai.protos.Permission.GranteeType.__neg__": true,
+ "google.generativeai.protos.Permission.GranteeType.__new__": true,
+ "google.generativeai.protos.Permission.GranteeType.__or__": true,
+ "google.generativeai.protos.Permission.GranteeType.__pos__": true,
+ "google.generativeai.protos.Permission.GranteeType.__pow__": true,
+ "google.generativeai.protos.Permission.GranteeType.__radd__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rand__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rlshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rmod__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rmul__": true,
+ "google.generativeai.protos.Permission.GranteeType.__ror__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rpow__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rrshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rsub__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rtruediv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rxor__": true,
+ "google.generativeai.protos.Permission.GranteeType.__sub__": true,
+ "google.generativeai.protos.Permission.GranteeType.__truediv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__xor__": true,
+ "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": true,
+ "google.generativeai.protos.Permission.GranteeType.bit_count": true,
+ "google.generativeai.protos.Permission.GranteeType.bit_length": true,
+ "google.generativeai.protos.Permission.GranteeType.conjugate": true,
+ "google.generativeai.protos.Permission.GranteeType.denominator": true,
+ "google.generativeai.protos.Permission.GranteeType.from_bytes": true,
+ "google.generativeai.protos.Permission.GranteeType.imag": true,
+ "google.generativeai.protos.Permission.GranteeType.numerator": true,
+ "google.generativeai.protos.Permission.GranteeType.real": true,
+ "google.generativeai.protos.Permission.GranteeType.to_bytes": true,
+ "google.generativeai.protos.Permission.Role": false,
+ "google.generativeai.protos.Permission.Role.OWNER": true,
+ "google.generativeai.protos.Permission.Role.READER": true,
+ "google.generativeai.protos.Permission.Role.ROLE_UNSPECIFIED": true,
+ "google.generativeai.protos.Permission.Role.WRITER": true,
+ "google.generativeai.protos.Permission.Role.__abs__": true,
+ "google.generativeai.protos.Permission.Role.__add__": true,
+ "google.generativeai.protos.Permission.Role.__and__": true,
+ "google.generativeai.protos.Permission.Role.__bool__": true,
+ "google.generativeai.protos.Permission.Role.__contains__": true,
+ "google.generativeai.protos.Permission.Role.__eq__": true,
+ "google.generativeai.protos.Permission.Role.__floordiv__": true,
+ "google.generativeai.protos.Permission.Role.__ge__": true,
+ "google.generativeai.protos.Permission.Role.__getitem__": true,
+ "google.generativeai.protos.Permission.Role.__gt__": true,
+ "google.generativeai.protos.Permission.Role.__init__": true,
+ "google.generativeai.protos.Permission.Role.__invert__": true,
+ "google.generativeai.protos.Permission.Role.__iter__": true,
+ "google.generativeai.protos.Permission.Role.__le__": true,
+ "google.generativeai.protos.Permission.Role.__len__": true,
+ "google.generativeai.protos.Permission.Role.__lshift__": true,
+ "google.generativeai.protos.Permission.Role.__lt__": true,
+ "google.generativeai.protos.Permission.Role.__mod__": true,
+ "google.generativeai.protos.Permission.Role.__mul__": true,
+ "google.generativeai.protos.Permission.Role.__ne__": true,
+ "google.generativeai.protos.Permission.Role.__neg__": true,
+ "google.generativeai.protos.Permission.Role.__new__": true,
+ "google.generativeai.protos.Permission.Role.__or__": true,
+ "google.generativeai.protos.Permission.Role.__pos__": true,
+ "google.generativeai.protos.Permission.Role.__pow__": true,
+ "google.generativeai.protos.Permission.Role.__radd__": true,
+ "google.generativeai.protos.Permission.Role.__rand__": true,
+ "google.generativeai.protos.Permission.Role.__rfloordiv__": true,
+ "google.generativeai.protos.Permission.Role.__rlshift__": true,
+ "google.generativeai.protos.Permission.Role.__rmod__": true,
+ "google.generativeai.protos.Permission.Role.__rmul__": true,
+ "google.generativeai.protos.Permission.Role.__ror__": true,
+ "google.generativeai.protos.Permission.Role.__rpow__": true,
+ "google.generativeai.protos.Permission.Role.__rrshift__": true,
+ "google.generativeai.protos.Permission.Role.__rshift__": true,
+ "google.generativeai.protos.Permission.Role.__rsub__": true,
+ "google.generativeai.protos.Permission.Role.__rtruediv__": true,
+ "google.generativeai.protos.Permission.Role.__rxor__": true,
+ "google.generativeai.protos.Permission.Role.__sub__": true,
+ "google.generativeai.protos.Permission.Role.__truediv__": true,
+ "google.generativeai.protos.Permission.Role.__xor__": true,
+ "google.generativeai.protos.Permission.Role.as_integer_ratio": true,
+ "google.generativeai.protos.Permission.Role.bit_count": true,
+ "google.generativeai.protos.Permission.Role.bit_length": true,
+ "google.generativeai.protos.Permission.Role.conjugate": true,
+ "google.generativeai.protos.Permission.Role.denominator": true,
+ "google.generativeai.protos.Permission.Role.from_bytes": true,
+ "google.generativeai.protos.Permission.Role.imag": true,
+ "google.generativeai.protos.Permission.Role.numerator": true,
+ "google.generativeai.protos.Permission.Role.real": true,
+ "google.generativeai.protos.Permission.Role.to_bytes": true,
+ "google.generativeai.protos.Permission.__call__": true,
+ "google.generativeai.protos.Permission.__eq__": true,
+ "google.generativeai.protos.Permission.__ge__": true,
+ "google.generativeai.protos.Permission.__gt__": true,
+ "google.generativeai.protos.Permission.__init__": true,
+ "google.generativeai.protos.Permission.__le__": true,
+ "google.generativeai.protos.Permission.__lt__": true,
+ "google.generativeai.protos.Permission.__ne__": true,
+ "google.generativeai.protos.Permission.__new__": true,
+ "google.generativeai.protos.Permission.__or__": true,
+ "google.generativeai.protos.Permission.__ror__": true,
+ "google.generativeai.protos.Permission.copy_from": true,
+ "google.generativeai.protos.Permission.deserialize": true,
+ "google.generativeai.protos.Permission.email_address": true,
+ "google.generativeai.protos.Permission.from_json": true,
+ "google.generativeai.protos.Permission.grantee_type": true,
+ "google.generativeai.protos.Permission.mro": true,
+ "google.generativeai.protos.Permission.name": true,
+ "google.generativeai.protos.Permission.pb": true,
+ "google.generativeai.protos.Permission.role": true,
+ "google.generativeai.protos.Permission.serialize": true,
+ "google.generativeai.protos.Permission.to_dict": true,
+ "google.generativeai.protos.Permission.to_json": true,
+ "google.generativeai.protos.Permission.wrap": true,
+ "google.generativeai.protos.QueryCorpusRequest": false,
+ "google.generativeai.protos.QueryCorpusRequest.__call__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__eq__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__ge__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__gt__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__init__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__le__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__lt__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__ne__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__new__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__or__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__ror__": true,
+ "google.generativeai.protos.QueryCorpusRequest.copy_from": true,
+ "google.generativeai.protos.QueryCorpusRequest.deserialize": true,
+ "google.generativeai.protos.QueryCorpusRequest.from_json": true,
+ "google.generativeai.protos.QueryCorpusRequest.metadata_filters": true,
+ "google.generativeai.protos.QueryCorpusRequest.mro": true,
+ "google.generativeai.protos.QueryCorpusRequest.name": true,
+ "google.generativeai.protos.QueryCorpusRequest.pb": true,
+ "google.generativeai.protos.QueryCorpusRequest.query": true,
+ "google.generativeai.protos.QueryCorpusRequest.results_count": true,
+ "google.generativeai.protos.QueryCorpusRequest.serialize": true,
+ "google.generativeai.protos.QueryCorpusRequest.to_dict": true,
+ "google.generativeai.protos.QueryCorpusRequest.to_json": true,
+ "google.generativeai.protos.QueryCorpusRequest.wrap": true,
+ "google.generativeai.protos.QueryCorpusResponse": false,
+ "google.generativeai.protos.QueryCorpusResponse.__call__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__eq__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__ge__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__gt__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__init__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__le__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__lt__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__ne__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__new__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__or__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__ror__": true,
+ "google.generativeai.protos.QueryCorpusResponse.copy_from": true,
+ "google.generativeai.protos.QueryCorpusResponse.deserialize": true,
+ "google.generativeai.protos.QueryCorpusResponse.from_json": true,
+ "google.generativeai.protos.QueryCorpusResponse.mro": true,
+ "google.generativeai.protos.QueryCorpusResponse.pb": true,
+ "google.generativeai.protos.QueryCorpusResponse.relevant_chunks": true,
+ "google.generativeai.protos.QueryCorpusResponse.serialize": true,
+ "google.generativeai.protos.QueryCorpusResponse.to_dict": true,
+ "google.generativeai.protos.QueryCorpusResponse.to_json": true,
+ "google.generativeai.protos.QueryCorpusResponse.wrap": true,
+ "google.generativeai.protos.QueryDocumentRequest": false,
+ "google.generativeai.protos.QueryDocumentRequest.__call__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__eq__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__ge__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__gt__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__init__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__le__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__lt__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__ne__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__new__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__or__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__ror__": true,
+ "google.generativeai.protos.QueryDocumentRequest.copy_from": true,
+ "google.generativeai.protos.QueryDocumentRequest.deserialize": true,
+ "google.generativeai.protos.QueryDocumentRequest.from_json": true,
+ "google.generativeai.protos.QueryDocumentRequest.metadata_filters": true,
+ "google.generativeai.protos.QueryDocumentRequest.mro": true,
+ "google.generativeai.protos.QueryDocumentRequest.name": true,
+ "google.generativeai.protos.QueryDocumentRequest.pb": true,
+ "google.generativeai.protos.QueryDocumentRequest.query": true,
+ "google.generativeai.protos.QueryDocumentRequest.results_count": true,
+ "google.generativeai.protos.QueryDocumentRequest.serialize": true,
+ "google.generativeai.protos.QueryDocumentRequest.to_dict": true,
+ "google.generativeai.protos.QueryDocumentRequest.to_json": true,
+ "google.generativeai.protos.QueryDocumentRequest.wrap": true,
+ "google.generativeai.protos.QueryDocumentResponse": false,
+ "google.generativeai.protos.QueryDocumentResponse.__call__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__eq__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__ge__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__gt__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__init__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__le__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__lt__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__ne__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__new__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__or__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__ror__": true,
+ "google.generativeai.protos.QueryDocumentResponse.copy_from": true,
+ "google.generativeai.protos.QueryDocumentResponse.deserialize": true,
+ "google.generativeai.protos.QueryDocumentResponse.from_json": true,
+ "google.generativeai.protos.QueryDocumentResponse.mro": true,
+ "google.generativeai.protos.QueryDocumentResponse.pb": true,
+ "google.generativeai.protos.QueryDocumentResponse.relevant_chunks": true,
+ "google.generativeai.protos.QueryDocumentResponse.serialize": true,
+ "google.generativeai.protos.QueryDocumentResponse.to_dict": true,
+ "google.generativeai.protos.QueryDocumentResponse.to_json": true,
+ "google.generativeai.protos.QueryDocumentResponse.wrap": true,
+ "google.generativeai.protos.RelevantChunk": false,
+ "google.generativeai.protos.RelevantChunk.__call__": true,
+ "google.generativeai.protos.RelevantChunk.__eq__": true,
+ "google.generativeai.protos.RelevantChunk.__ge__": true,
+ "google.generativeai.protos.RelevantChunk.__gt__": true,
+ "google.generativeai.protos.RelevantChunk.__init__": true,
+ "google.generativeai.protos.RelevantChunk.__le__": true,
+ "google.generativeai.protos.RelevantChunk.__lt__": true,
+ "google.generativeai.protos.RelevantChunk.__ne__": true,
+ "google.generativeai.protos.RelevantChunk.__new__": true,
+ "google.generativeai.protos.RelevantChunk.__or__": true,
+ "google.generativeai.protos.RelevantChunk.__ror__": true,
+ "google.generativeai.protos.RelevantChunk.chunk": true,
+ "google.generativeai.protos.RelevantChunk.chunk_relevance_score": true,
+ "google.generativeai.protos.RelevantChunk.copy_from": true,
+ "google.generativeai.protos.RelevantChunk.deserialize": true,
+ "google.generativeai.protos.RelevantChunk.from_json": true,
+ "google.generativeai.protos.RelevantChunk.mro": true,
+ "google.generativeai.protos.RelevantChunk.pb": true,
+ "google.generativeai.protos.RelevantChunk.serialize": true,
+ "google.generativeai.protos.RelevantChunk.to_dict": true,
+ "google.generativeai.protos.RelevantChunk.to_json": true,
+ "google.generativeai.protos.RelevantChunk.wrap": true,
+ "google.generativeai.protos.SafetyFeedback": false,
+ "google.generativeai.protos.SafetyFeedback.__call__": true,
+ "google.generativeai.protos.SafetyFeedback.__eq__": true,
+ "google.generativeai.protos.SafetyFeedback.__ge__": true,
+ "google.generativeai.protos.SafetyFeedback.__gt__": true,
+ "google.generativeai.protos.SafetyFeedback.__init__": true,
+ "google.generativeai.protos.SafetyFeedback.__le__": true,
+ "google.generativeai.protos.SafetyFeedback.__lt__": true,
+ "google.generativeai.protos.SafetyFeedback.__ne__": true,
+ "google.generativeai.protos.SafetyFeedback.__new__": true,
+ "google.generativeai.protos.SafetyFeedback.__or__": true,
+ "google.generativeai.protos.SafetyFeedback.__ror__": true,
+ "google.generativeai.protos.SafetyFeedback.copy_from": true,
+ "google.generativeai.protos.SafetyFeedback.deserialize": true,
+ "google.generativeai.protos.SafetyFeedback.from_json": true,
+ "google.generativeai.protos.SafetyFeedback.mro": true,
+ "google.generativeai.protos.SafetyFeedback.pb": true,
+ "google.generativeai.protos.SafetyFeedback.rating": true,
+ "google.generativeai.protos.SafetyFeedback.serialize": true,
+ "google.generativeai.protos.SafetyFeedback.setting": true,
+ "google.generativeai.protos.SafetyFeedback.to_dict": true,
+ "google.generativeai.protos.SafetyFeedback.to_json": true,
+ "google.generativeai.protos.SafetyFeedback.wrap": true,
+ "google.generativeai.protos.SafetyRating": false,
+ "google.generativeai.protos.SafetyRating.HarmProbability": false,
+ "google.generativeai.protos.SafetyRating.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.HIGH": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.LOW": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.MEDIUM": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.NEGLIGIBLE": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__add__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__and__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__contains__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__init__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__iter__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__le__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__len__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__new__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__or__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.denominator": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.imag": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.numerator": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.real": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": true,
+ "google.generativeai.protos.SafetyRating.__call__": true,
+ "google.generativeai.protos.SafetyRating.__eq__": true,
+ "google.generativeai.protos.SafetyRating.__ge__": true,
+ "google.generativeai.protos.SafetyRating.__gt__": true,
+ "google.generativeai.protos.SafetyRating.__init__": true,
+ "google.generativeai.protos.SafetyRating.__le__": true,
+ "google.generativeai.protos.SafetyRating.__lt__": true,
+ "google.generativeai.protos.SafetyRating.__ne__": true,
+ "google.generativeai.protos.SafetyRating.__new__": true,
+ "google.generativeai.protos.SafetyRating.__or__": true,
+ "google.generativeai.protos.SafetyRating.__ror__": true,
+ "google.generativeai.protos.SafetyRating.blocked": true,
+ "google.generativeai.protos.SafetyRating.category": true,
+ "google.generativeai.protos.SafetyRating.copy_from": true,
+ "google.generativeai.protos.SafetyRating.deserialize": true,
+ "google.generativeai.protos.SafetyRating.from_json": true,
+ "google.generativeai.protos.SafetyRating.mro": true,
+ "google.generativeai.protos.SafetyRating.pb": true,
+ "google.generativeai.protos.SafetyRating.probability": true,
+ "google.generativeai.protos.SafetyRating.serialize": true,
+ "google.generativeai.protos.SafetyRating.to_dict": true,
+ "google.generativeai.protos.SafetyRating.to_json": true,
+ "google.generativeai.protos.SafetyRating.wrap": true,
+ "google.generativeai.protos.SafetySetting": false,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold": false,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_NONE": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": true,
+ "google.generativeai.protos.SafetySetting.__call__": true,
+ "google.generativeai.protos.SafetySetting.__eq__": true,
+ "google.generativeai.protos.SafetySetting.__ge__": true,
+ "google.generativeai.protos.SafetySetting.__gt__": true,
+ "google.generativeai.protos.SafetySetting.__init__": true,
+ "google.generativeai.protos.SafetySetting.__le__": true,
+ "google.generativeai.protos.SafetySetting.__lt__": true,
+ "google.generativeai.protos.SafetySetting.__ne__": true,
+ "google.generativeai.protos.SafetySetting.__new__": true,
+ "google.generativeai.protos.SafetySetting.__or__": true,
+ "google.generativeai.protos.SafetySetting.__ror__": true,
+ "google.generativeai.protos.SafetySetting.category": true,
+ "google.generativeai.protos.SafetySetting.copy_from": true,
+ "google.generativeai.protos.SafetySetting.deserialize": true,
+ "google.generativeai.protos.SafetySetting.from_json": true,
+ "google.generativeai.protos.SafetySetting.mro": true,
+ "google.generativeai.protos.SafetySetting.pb": true,
+ "google.generativeai.protos.SafetySetting.serialize": true,
+ "google.generativeai.protos.SafetySetting.threshold": true,
+ "google.generativeai.protos.SafetySetting.to_dict": true,
+ "google.generativeai.protos.SafetySetting.to_json": true,
+ "google.generativeai.protos.SafetySetting.wrap": true,
+ "google.generativeai.protos.Schema": false,
+ "google.generativeai.protos.Schema.PropertiesEntry": false,
+ "google.generativeai.protos.Schema.PropertiesEntry.__call__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__eq__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__ge__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__gt__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__init__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__le__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__lt__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__ne__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__new__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__or__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__ror__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.copy_from": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.deserialize": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.from_json": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.key": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.mro": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.pb": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.serialize": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.to_dict": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.to_json": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.value": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.wrap": true,
+ "google.generativeai.protos.Schema.__call__": true,
+ "google.generativeai.protos.Schema.__eq__": true,
+ "google.generativeai.protos.Schema.__ge__": true,
+ "google.generativeai.protos.Schema.__gt__": true,
+ "google.generativeai.protos.Schema.__init__": true,
+ "google.generativeai.protos.Schema.__le__": true,
+ "google.generativeai.protos.Schema.__lt__": true,
+ "google.generativeai.protos.Schema.__ne__": true,
+ "google.generativeai.protos.Schema.__new__": true,
+ "google.generativeai.protos.Schema.__or__": true,
+ "google.generativeai.protos.Schema.__ror__": true,
+ "google.generativeai.protos.Schema.copy_from": true,
+ "google.generativeai.protos.Schema.description": true,
+ "google.generativeai.protos.Schema.deserialize": true,
+ "google.generativeai.protos.Schema.enum": true,
+ "google.generativeai.protos.Schema.format_": true,
+ "google.generativeai.protos.Schema.from_json": true,
+ "google.generativeai.protos.Schema.items": true,
+ "google.generativeai.protos.Schema.mro": true,
+ "google.generativeai.protos.Schema.nullable": true,
+ "google.generativeai.protos.Schema.pb": true,
+ "google.generativeai.protos.Schema.properties": true,
+ "google.generativeai.protos.Schema.required": true,
+ "google.generativeai.protos.Schema.serialize": true,
+ "google.generativeai.protos.Schema.to_dict": true,
+ "google.generativeai.protos.Schema.to_json": true,
+ "google.generativeai.protos.Schema.type_": true,
+ "google.generativeai.protos.Schema.wrap": true,
+ "google.generativeai.protos.SemanticRetrieverConfig": false,
+ "google.generativeai.protos.SemanticRetrieverConfig.__call__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__eq__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__ge__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__gt__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__init__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__le__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__lt__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__ne__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__new__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__or__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__ror__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.copy_from": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.deserialize": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.from_json": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.max_chunks_count": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.metadata_filters": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.minimum_relevance_score": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.mro": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.pb": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.query": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.serialize": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.source": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.to_dict": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.to_json": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.wrap": true,
+ "google.generativeai.protos.StringList": false,
+ "google.generativeai.protos.StringList.__call__": true,
+ "google.generativeai.protos.StringList.__eq__": true,
+ "google.generativeai.protos.StringList.__ge__": true,
+ "google.generativeai.protos.StringList.__gt__": true,
+ "google.generativeai.protos.StringList.__init__": true,
+ "google.generativeai.protos.StringList.__le__": true,
+ "google.generativeai.protos.StringList.__lt__": true,
+ "google.generativeai.protos.StringList.__ne__": true,
+ "google.generativeai.protos.StringList.__new__": true,
+ "google.generativeai.protos.StringList.__or__": true,
+ "google.generativeai.protos.StringList.__ror__": true,
+ "google.generativeai.protos.StringList.copy_from": true,
+ "google.generativeai.protos.StringList.deserialize": true,
+ "google.generativeai.protos.StringList.from_json": true,
+ "google.generativeai.protos.StringList.mro": true,
+ "google.generativeai.protos.StringList.pb": true,
+ "google.generativeai.protos.StringList.serialize": true,
+ "google.generativeai.protos.StringList.to_dict": true,
+ "google.generativeai.protos.StringList.to_json": true,
+ "google.generativeai.protos.StringList.values": true,
+ "google.generativeai.protos.StringList.wrap": true,
+ "google.generativeai.protos.TaskType": false,
+ "google.generativeai.protos.TaskType.CLASSIFICATION": true,
+ "google.generativeai.protos.TaskType.CLUSTERING": true,
+ "google.generativeai.protos.TaskType.FACT_VERIFICATION": true,
+ "google.generativeai.protos.TaskType.QUESTION_ANSWERING": true,
+ "google.generativeai.protos.TaskType.RETRIEVAL_DOCUMENT": true,
+ "google.generativeai.protos.TaskType.RETRIEVAL_QUERY": true,
+ "google.generativeai.protos.TaskType.SEMANTIC_SIMILARITY": true,
+ "google.generativeai.protos.TaskType.TASK_TYPE_UNSPECIFIED": true,
+ "google.generativeai.protos.TaskType.__abs__": true,
+ "google.generativeai.protos.TaskType.__add__": true,
+ "google.generativeai.protos.TaskType.__and__": true,
+ "google.generativeai.protos.TaskType.__bool__": true,
+ "google.generativeai.protos.TaskType.__contains__": true,
+ "google.generativeai.protos.TaskType.__eq__": true,
+ "google.generativeai.protos.TaskType.__floordiv__": true,
+ "google.generativeai.protos.TaskType.__ge__": true,
+ "google.generativeai.protos.TaskType.__getitem__": true,
+ "google.generativeai.protos.TaskType.__gt__": true,
+ "google.generativeai.protos.TaskType.__init__": true,
+ "google.generativeai.protos.TaskType.__invert__": true,
+ "google.generativeai.protos.TaskType.__iter__": true,
+ "google.generativeai.protos.TaskType.__le__": true,
+ "google.generativeai.protos.TaskType.__len__": true,
+ "google.generativeai.protos.TaskType.__lshift__": true,
+ "google.generativeai.protos.TaskType.__lt__": true,
+ "google.generativeai.protos.TaskType.__mod__": true,
+ "google.generativeai.protos.TaskType.__mul__": true,
+ "google.generativeai.protos.TaskType.__ne__": true,
+ "google.generativeai.protos.TaskType.__neg__": true,
+ "google.generativeai.protos.TaskType.__new__": true,
+ "google.generativeai.protos.TaskType.__or__": true,
+ "google.generativeai.protos.TaskType.__pos__": true,
+ "google.generativeai.protos.TaskType.__pow__": true,
+ "google.generativeai.protos.TaskType.__radd__": true,
+ "google.generativeai.protos.TaskType.__rand__": true,
+ "google.generativeai.protos.TaskType.__rfloordiv__": true,
+ "google.generativeai.protos.TaskType.__rlshift__": true,
+ "google.generativeai.protos.TaskType.__rmod__": true,
+ "google.generativeai.protos.TaskType.__rmul__": true,
+ "google.generativeai.protos.TaskType.__ror__": true,
+ "google.generativeai.protos.TaskType.__rpow__": true,
+ "google.generativeai.protos.TaskType.__rrshift__": true,
+ "google.generativeai.protos.TaskType.__rshift__": true,
+ "google.generativeai.protos.TaskType.__rsub__": true,
+ "google.generativeai.protos.TaskType.__rtruediv__": true,
+ "google.generativeai.protos.TaskType.__rxor__": true,
+ "google.generativeai.protos.TaskType.__sub__": true,
+ "google.generativeai.protos.TaskType.__truediv__": true,
+ "google.generativeai.protos.TaskType.__xor__": true,
+ "google.generativeai.protos.TaskType.as_integer_ratio": true,
+ "google.generativeai.protos.TaskType.bit_count": true,
+ "google.generativeai.protos.TaskType.bit_length": true,
+ "google.generativeai.protos.TaskType.conjugate": true,
+ "google.generativeai.protos.TaskType.denominator": true,
+ "google.generativeai.protos.TaskType.from_bytes": true,
+ "google.generativeai.protos.TaskType.imag": true,
+ "google.generativeai.protos.TaskType.numerator": true,
+ "google.generativeai.protos.TaskType.real": true,
+ "google.generativeai.protos.TaskType.to_bytes": true,
+ "google.generativeai.protos.TextCompletion": false,
+ "google.generativeai.protos.TextCompletion.__call__": true,
+ "google.generativeai.protos.TextCompletion.__eq__": true,
+ "google.generativeai.protos.TextCompletion.__ge__": true,
+ "google.generativeai.protos.TextCompletion.__gt__": true,
+ "google.generativeai.protos.TextCompletion.__init__": true,
+ "google.generativeai.protos.TextCompletion.__le__": true,
+ "google.generativeai.protos.TextCompletion.__lt__": true,
+ "google.generativeai.protos.TextCompletion.__ne__": true,
+ "google.generativeai.protos.TextCompletion.__new__": true,
+ "google.generativeai.protos.TextCompletion.__or__": true,
+ "google.generativeai.protos.TextCompletion.__ror__": true,
+ "google.generativeai.protos.TextCompletion.citation_metadata": true,
+ "google.generativeai.protos.TextCompletion.copy_from": true,
+ "google.generativeai.protos.TextCompletion.deserialize": true,
+ "google.generativeai.protos.TextCompletion.from_json": true,
+ "google.generativeai.protos.TextCompletion.mro": true,
+ "google.generativeai.protos.TextCompletion.output": true,
+ "google.generativeai.protos.TextCompletion.pb": true,
+ "google.generativeai.protos.TextCompletion.safety_ratings": true,
+ "google.generativeai.protos.TextCompletion.serialize": true,
+ "google.generativeai.protos.TextCompletion.to_dict": true,
+ "google.generativeai.protos.TextCompletion.to_json": true,
+ "google.generativeai.protos.TextCompletion.wrap": true,
+ "google.generativeai.protos.TextPrompt": false,
+ "google.generativeai.protos.TextPrompt.__call__": true,
+ "google.generativeai.protos.TextPrompt.__eq__": true,
+ "google.generativeai.protos.TextPrompt.__ge__": true,
+ "google.generativeai.protos.TextPrompt.__gt__": true,
+ "google.generativeai.protos.TextPrompt.__init__": true,
+ "google.generativeai.protos.TextPrompt.__le__": true,
+ "google.generativeai.protos.TextPrompt.__lt__": true,
+ "google.generativeai.protos.TextPrompt.__ne__": true,
+ "google.generativeai.protos.TextPrompt.__new__": true,
+ "google.generativeai.protos.TextPrompt.__or__": true,
+ "google.generativeai.protos.TextPrompt.__ror__": true,
+ "google.generativeai.protos.TextPrompt.copy_from": true,
+ "google.generativeai.protos.TextPrompt.deserialize": true,
+ "google.generativeai.protos.TextPrompt.from_json": true,
+ "google.generativeai.protos.TextPrompt.mro": true,
+ "google.generativeai.protos.TextPrompt.pb": true,
+ "google.generativeai.protos.TextPrompt.serialize": true,
+ "google.generativeai.protos.TextPrompt.text": true,
+ "google.generativeai.protos.TextPrompt.to_dict": true,
+ "google.generativeai.protos.TextPrompt.to_json": true,
+ "google.generativeai.protos.TextPrompt.wrap": true,
+ "google.generativeai.protos.Tool": false,
+ "google.generativeai.protos.Tool.__call__": true,
+ "google.generativeai.protos.Tool.__eq__": true,
+ "google.generativeai.protos.Tool.__ge__": true,
+ "google.generativeai.protos.Tool.__gt__": true,
+ "google.generativeai.protos.Tool.__init__": true,
+ "google.generativeai.protos.Tool.__le__": true,
+ "google.generativeai.protos.Tool.__lt__": true,
+ "google.generativeai.protos.Tool.__ne__": true,
+ "google.generativeai.protos.Tool.__new__": true,
+ "google.generativeai.protos.Tool.__or__": true,
+ "google.generativeai.protos.Tool.__ror__": true,
+ "google.generativeai.protos.Tool.code_execution": true,
+ "google.generativeai.protos.Tool.copy_from": true,
+ "google.generativeai.protos.Tool.deserialize": true,
+ "google.generativeai.protos.Tool.from_json": true,
+ "google.generativeai.protos.Tool.function_declarations": true,
+ "google.generativeai.protos.Tool.mro": true,
+ "google.generativeai.protos.Tool.pb": true,
+ "google.generativeai.protos.Tool.serialize": true,
+ "google.generativeai.protos.Tool.to_dict": true,
+ "google.generativeai.protos.Tool.to_json": true,
+ "google.generativeai.protos.Tool.wrap": true,
+ "google.generativeai.protos.ToolConfig": false,
+ "google.generativeai.protos.ToolConfig.__call__": true,
+ "google.generativeai.protos.ToolConfig.__eq__": true,
+ "google.generativeai.protos.ToolConfig.__ge__": true,
+ "google.generativeai.protos.ToolConfig.__gt__": true,
+ "google.generativeai.protos.ToolConfig.__init__": true,
+ "google.generativeai.protos.ToolConfig.__le__": true,
+ "google.generativeai.protos.ToolConfig.__lt__": true,
+ "google.generativeai.protos.ToolConfig.__ne__": true,
+ "google.generativeai.protos.ToolConfig.__new__": true,
+ "google.generativeai.protos.ToolConfig.__or__": true,
+ "google.generativeai.protos.ToolConfig.__ror__": true,
+ "google.generativeai.protos.ToolConfig.copy_from": true,
+ "google.generativeai.protos.ToolConfig.deserialize": true,
+ "google.generativeai.protos.ToolConfig.from_json": true,
+ "google.generativeai.protos.ToolConfig.function_calling_config": true,
+ "google.generativeai.protos.ToolConfig.mro": true,
+ "google.generativeai.protos.ToolConfig.pb": true,
+ "google.generativeai.protos.ToolConfig.serialize": true,
+ "google.generativeai.protos.ToolConfig.to_dict": true,
+ "google.generativeai.protos.ToolConfig.to_json": true,
+ "google.generativeai.protos.ToolConfig.wrap": true,
+ "google.generativeai.protos.TransferOwnershipRequest": false,
+ "google.generativeai.protos.TransferOwnershipRequest.__call__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__eq__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__ge__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__gt__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__init__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__le__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__lt__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__ne__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__new__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__or__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__ror__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.copy_from": true,
+ "google.generativeai.protos.TransferOwnershipRequest.deserialize": true,
+ "google.generativeai.protos.TransferOwnershipRequest.email_address": true,
+ "google.generativeai.protos.TransferOwnershipRequest.from_json": true,
+ "google.generativeai.protos.TransferOwnershipRequest.mro": true,
+ "google.generativeai.protos.TransferOwnershipRequest.name": true,
+ "google.generativeai.protos.TransferOwnershipRequest.pb": true,
+ "google.generativeai.protos.TransferOwnershipRequest.serialize": true,
+ "google.generativeai.protos.TransferOwnershipRequest.to_dict": true,
+ "google.generativeai.protos.TransferOwnershipRequest.to_json": true,
+ "google.generativeai.protos.TransferOwnershipRequest.wrap": true,
+ "google.generativeai.protos.TransferOwnershipResponse": false,
+ "google.generativeai.protos.TransferOwnershipResponse.__call__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__eq__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__ge__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__gt__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__init__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__le__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__lt__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__ne__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__new__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__or__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__ror__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.copy_from": true,
+ "google.generativeai.protos.TransferOwnershipResponse.deserialize": true,
+ "google.generativeai.protos.TransferOwnershipResponse.from_json": true,
+ "google.generativeai.protos.TransferOwnershipResponse.mro": true,
+ "google.generativeai.protos.TransferOwnershipResponse.pb": true,
+ "google.generativeai.protos.TransferOwnershipResponse.serialize": true,
+ "google.generativeai.protos.TransferOwnershipResponse.to_dict": true,
+ "google.generativeai.protos.TransferOwnershipResponse.to_json": true,
+ "google.generativeai.protos.TransferOwnershipResponse.wrap": true,
+ "google.generativeai.protos.TunedModel": false,
+ "google.generativeai.protos.TunedModel.State": false,
+ "google.generativeai.protos.TunedModel.State.ACTIVE": true,
+ "google.generativeai.protos.TunedModel.State.CREATING": true,
+ "google.generativeai.protos.TunedModel.State.FAILED": true,
+ "google.generativeai.protos.TunedModel.State.STATE_UNSPECIFIED": true,
+ "google.generativeai.protos.TunedModel.State.__abs__": true,
+ "google.generativeai.protos.TunedModel.State.__add__": true,
+ "google.generativeai.protos.TunedModel.State.__and__": true,
+ "google.generativeai.protos.TunedModel.State.__bool__": true,
+ "google.generativeai.protos.TunedModel.State.__contains__": true,
+ "google.generativeai.protos.TunedModel.State.__eq__": true,
+ "google.generativeai.protos.TunedModel.State.__floordiv__": true,
+ "google.generativeai.protos.TunedModel.State.__ge__": true,
+ "google.generativeai.protos.TunedModel.State.__getitem__": true,
+ "google.generativeai.protos.TunedModel.State.__gt__": true,
+ "google.generativeai.protos.TunedModel.State.__init__": true,
+ "google.generativeai.protos.TunedModel.State.__invert__": true,
+ "google.generativeai.protos.TunedModel.State.__iter__": true,
+ "google.generativeai.protos.TunedModel.State.__le__": true,
+ "google.generativeai.protos.TunedModel.State.__len__": true,
+ "google.generativeai.protos.TunedModel.State.__lshift__": true,
+ "google.generativeai.protos.TunedModel.State.__lt__": true,
+ "google.generativeai.protos.TunedModel.State.__mod__": true,
+ "google.generativeai.protos.TunedModel.State.__mul__": true,
+ "google.generativeai.protos.TunedModel.State.__ne__": true,
+ "google.generativeai.protos.TunedModel.State.__neg__": true,
+ "google.generativeai.protos.TunedModel.State.__new__": true,
+ "google.generativeai.protos.TunedModel.State.__or__": true,
+ "google.generativeai.protos.TunedModel.State.__pos__": true,
+ "google.generativeai.protos.TunedModel.State.__pow__": true,
+ "google.generativeai.protos.TunedModel.State.__radd__": true,
+ "google.generativeai.protos.TunedModel.State.__rand__": true,
+ "google.generativeai.protos.TunedModel.State.__rfloordiv__": true,
+ "google.generativeai.protos.TunedModel.State.__rlshift__": true,
+ "google.generativeai.protos.TunedModel.State.__rmod__": true,
+ "google.generativeai.protos.TunedModel.State.__rmul__": true,
+ "google.generativeai.protos.TunedModel.State.__ror__": true,
+ "google.generativeai.protos.TunedModel.State.__rpow__": true,
+ "google.generativeai.protos.TunedModel.State.__rrshift__": true,
+ "google.generativeai.protos.TunedModel.State.__rshift__": true,
+ "google.generativeai.protos.TunedModel.State.__rsub__": true,
+ "google.generativeai.protos.TunedModel.State.__rtruediv__": true,
+ "google.generativeai.protos.TunedModel.State.__rxor__": true,
+ "google.generativeai.protos.TunedModel.State.__sub__": true,
+ "google.generativeai.protos.TunedModel.State.__truediv__": true,
+ "google.generativeai.protos.TunedModel.State.__xor__": true,
+ "google.generativeai.protos.TunedModel.State.as_integer_ratio": true,
+ "google.generativeai.protos.TunedModel.State.bit_count": true,
+ "google.generativeai.protos.TunedModel.State.bit_length": true,
+ "google.generativeai.protos.TunedModel.State.conjugate": true,
+ "google.generativeai.protos.TunedModel.State.denominator": true,
+ "google.generativeai.protos.TunedModel.State.from_bytes": true,
+ "google.generativeai.protos.TunedModel.State.imag": true,
+ "google.generativeai.protos.TunedModel.State.numerator": true,
+ "google.generativeai.protos.TunedModel.State.real": true,
+ "google.generativeai.protos.TunedModel.State.to_bytes": true,
+ "google.generativeai.protos.TunedModel.__call__": true,
+ "google.generativeai.protos.TunedModel.__eq__": true,
+ "google.generativeai.protos.TunedModel.__ge__": true,
+ "google.generativeai.protos.TunedModel.__gt__": true,
+ "google.generativeai.protos.TunedModel.__init__": true,
+ "google.generativeai.protos.TunedModel.__le__": true,
+ "google.generativeai.protos.TunedModel.__lt__": true,
+ "google.generativeai.protos.TunedModel.__ne__": true,
+ "google.generativeai.protos.TunedModel.__new__": true,
+ "google.generativeai.protos.TunedModel.__or__": true,
+ "google.generativeai.protos.TunedModel.__ror__": true,
+ "google.generativeai.protos.TunedModel.base_model": true,
+ "google.generativeai.protos.TunedModel.copy_from": true,
+ "google.generativeai.protos.TunedModel.create_time": true,
+ "google.generativeai.protos.TunedModel.description": true,
+ "google.generativeai.protos.TunedModel.deserialize": true,
+ "google.generativeai.protos.TunedModel.display_name": true,
+ "google.generativeai.protos.TunedModel.from_json": true,
+ "google.generativeai.protos.TunedModel.mro": true,
+ "google.generativeai.protos.TunedModel.name": true,
+ "google.generativeai.protos.TunedModel.pb": true,
+ "google.generativeai.protos.TunedModel.serialize": true,
+ "google.generativeai.protos.TunedModel.state": true,
+ "google.generativeai.protos.TunedModel.temperature": true,
+ "google.generativeai.protos.TunedModel.to_dict": true,
+ "google.generativeai.protos.TunedModel.to_json": true,
+ "google.generativeai.protos.TunedModel.top_k": true,
+ "google.generativeai.protos.TunedModel.top_p": true,
+ "google.generativeai.protos.TunedModel.tuned_model_source": true,
+ "google.generativeai.protos.TunedModel.tuning_task": true,
+ "google.generativeai.protos.TunedModel.update_time": true,
+ "google.generativeai.protos.TunedModel.wrap": true,
+ "google.generativeai.protos.TunedModelSource": false,
+ "google.generativeai.protos.TunedModelSource.__call__": true,
+ "google.generativeai.protos.TunedModelSource.__eq__": true,
+ "google.generativeai.protos.TunedModelSource.__ge__": true,
+ "google.generativeai.protos.TunedModelSource.__gt__": true,
+ "google.generativeai.protos.TunedModelSource.__init__": true,
+ "google.generativeai.protos.TunedModelSource.__le__": true,
+ "google.generativeai.protos.TunedModelSource.__lt__": true,
+ "google.generativeai.protos.TunedModelSource.__ne__": true,
+ "google.generativeai.protos.TunedModelSource.__new__": true,
+ "google.generativeai.protos.TunedModelSource.__or__": true,
+ "google.generativeai.protos.TunedModelSource.__ror__": true,
+ "google.generativeai.protos.TunedModelSource.base_model": true,
+ "google.generativeai.protos.TunedModelSource.copy_from": true,
+ "google.generativeai.protos.TunedModelSource.deserialize": true,
+ "google.generativeai.protos.TunedModelSource.from_json": true,
+ "google.generativeai.protos.TunedModelSource.mro": true,
+ "google.generativeai.protos.TunedModelSource.pb": true,
+ "google.generativeai.protos.TunedModelSource.serialize": true,
+ "google.generativeai.protos.TunedModelSource.to_dict": true,
+ "google.generativeai.protos.TunedModelSource.to_json": true,
+ "google.generativeai.protos.TunedModelSource.tuned_model": true,
+ "google.generativeai.protos.TunedModelSource.wrap": true,
+ "google.generativeai.protos.TuningExample": false,
+ "google.generativeai.protos.TuningExample.__call__": true,
+ "google.generativeai.protos.TuningExample.__eq__": true,
+ "google.generativeai.protos.TuningExample.__ge__": true,
+ "google.generativeai.protos.TuningExample.__gt__": true,
+ "google.generativeai.protos.TuningExample.__init__": true,
+ "google.generativeai.protos.TuningExample.__le__": true,
+ "google.generativeai.protos.TuningExample.__lt__": true,
+ "google.generativeai.protos.TuningExample.__ne__": true,
+ "google.generativeai.protos.TuningExample.__new__": true,
+ "google.generativeai.protos.TuningExample.__or__": true,
+ "google.generativeai.protos.TuningExample.__ror__": true,
+ "google.generativeai.protos.TuningExample.copy_from": true,
+ "google.generativeai.protos.TuningExample.deserialize": true,
+ "google.generativeai.protos.TuningExample.from_json": true,
+ "google.generativeai.protos.TuningExample.mro": true,
+ "google.generativeai.protos.TuningExample.output": true,
+ "google.generativeai.protos.TuningExample.pb": true,
+ "google.generativeai.protos.TuningExample.serialize": true,
+ "google.generativeai.protos.TuningExample.text_input": true,
+ "google.generativeai.protos.TuningExample.to_dict": true,
+ "google.generativeai.protos.TuningExample.to_json": true,
+ "google.generativeai.protos.TuningExample.wrap": true,
+ "google.generativeai.protos.TuningExamples": false,
+ "google.generativeai.protos.TuningExamples.__call__": true,
+ "google.generativeai.protos.TuningExamples.__eq__": true,
+ "google.generativeai.protos.TuningExamples.__ge__": true,
+ "google.generativeai.protos.TuningExamples.__gt__": true,
+ "google.generativeai.protos.TuningExamples.__init__": true,
+ "google.generativeai.protos.TuningExamples.__le__": true,
+ "google.generativeai.protos.TuningExamples.__lt__": true,
+ "google.generativeai.protos.TuningExamples.__ne__": true,
+ "google.generativeai.protos.TuningExamples.__new__": true,
+ "google.generativeai.protos.TuningExamples.__or__": true,
+ "google.generativeai.protos.TuningExamples.__ror__": true,
+ "google.generativeai.protos.TuningExamples.copy_from": true,
+ "google.generativeai.protos.TuningExamples.deserialize": true,
+ "google.generativeai.protos.TuningExamples.examples": true,
+ "google.generativeai.protos.TuningExamples.from_json": true,
+ "google.generativeai.protos.TuningExamples.mro": true,
+ "google.generativeai.protos.TuningExamples.pb": true,
+ "google.generativeai.protos.TuningExamples.serialize": true,
+ "google.generativeai.protos.TuningExamples.to_dict": true,
+ "google.generativeai.protos.TuningExamples.to_json": true,
+ "google.generativeai.protos.TuningExamples.wrap": true,
+ "google.generativeai.protos.TuningSnapshot": false,
+ "google.generativeai.protos.TuningSnapshot.__call__": true,
+ "google.generativeai.protos.TuningSnapshot.__eq__": true,
+ "google.generativeai.protos.TuningSnapshot.__ge__": true,
+ "google.generativeai.protos.TuningSnapshot.__gt__": true,
+ "google.generativeai.protos.TuningSnapshot.__init__": true,
+ "google.generativeai.protos.TuningSnapshot.__le__": true,
+ "google.generativeai.protos.TuningSnapshot.__lt__": true,
+ "google.generativeai.protos.TuningSnapshot.__ne__": true,
+ "google.generativeai.protos.TuningSnapshot.__new__": true,
+ "google.generativeai.protos.TuningSnapshot.__or__": true,
+ "google.generativeai.protos.TuningSnapshot.__ror__": true,
+ "google.generativeai.protos.TuningSnapshot.compute_time": true,
+ "google.generativeai.protos.TuningSnapshot.copy_from": true,
+ "google.generativeai.protos.TuningSnapshot.deserialize": true,
+ "google.generativeai.protos.TuningSnapshot.epoch": true,
+ "google.generativeai.protos.TuningSnapshot.from_json": true,
+ "google.generativeai.protos.TuningSnapshot.mean_loss": true,
+ "google.generativeai.protos.TuningSnapshot.mro": true,
+ "google.generativeai.protos.TuningSnapshot.pb": true,
+ "google.generativeai.protos.TuningSnapshot.serialize": true,
+ "google.generativeai.protos.TuningSnapshot.step": true,
+ "google.generativeai.protos.TuningSnapshot.to_dict": true,
+ "google.generativeai.protos.TuningSnapshot.to_json": true,
+ "google.generativeai.protos.TuningSnapshot.wrap": true,
+ "google.generativeai.protos.TuningTask": false,
+ "google.generativeai.protos.TuningTask.__call__": true,
+ "google.generativeai.protos.TuningTask.__eq__": true,
+ "google.generativeai.protos.TuningTask.__ge__": true,
+ "google.generativeai.protos.TuningTask.__gt__": true,
+ "google.generativeai.protos.TuningTask.__init__": true,
+ "google.generativeai.protos.TuningTask.__le__": true,
+ "google.generativeai.protos.TuningTask.__lt__": true,
+ "google.generativeai.protos.TuningTask.__ne__": true,
+ "google.generativeai.protos.TuningTask.__new__": true,
+ "google.generativeai.protos.TuningTask.__or__": true,
+ "google.generativeai.protos.TuningTask.__ror__": true,
+ "google.generativeai.protos.TuningTask.complete_time": true,
+ "google.generativeai.protos.TuningTask.copy_from": true,
+ "google.generativeai.protos.TuningTask.deserialize": true,
+ "google.generativeai.protos.TuningTask.from_json": true,
+ "google.generativeai.protos.TuningTask.hyperparameters": true,
+ "google.generativeai.protos.TuningTask.mro": true,
+ "google.generativeai.protos.TuningTask.pb": true,
+ "google.generativeai.protos.TuningTask.serialize": true,
+ "google.generativeai.protos.TuningTask.snapshots": true,
+ "google.generativeai.protos.TuningTask.start_time": true,
+ "google.generativeai.protos.TuningTask.to_dict": true,
+ "google.generativeai.protos.TuningTask.to_json": true,
+ "google.generativeai.protos.TuningTask.training_data": true,
+ "google.generativeai.protos.TuningTask.wrap": true,
+ "google.generativeai.protos.Type": false,
+ "google.generativeai.protos.Type.ARRAY": true,
+ "google.generativeai.protos.Type.BOOLEAN": true,
+ "google.generativeai.protos.Type.INTEGER": true,
+ "google.generativeai.protos.Type.NUMBER": true,
+ "google.generativeai.protos.Type.OBJECT": true,
+ "google.generativeai.protos.Type.STRING": true,
+ "google.generativeai.protos.Type.TYPE_UNSPECIFIED": true,
+ "google.generativeai.protos.Type.__abs__": true,
+ "google.generativeai.protos.Type.__add__": true,
+ "google.generativeai.protos.Type.__and__": true,
+ "google.generativeai.protos.Type.__bool__": true,
+ "google.generativeai.protos.Type.__contains__": true,
+ "google.generativeai.protos.Type.__eq__": true,
+ "google.generativeai.protos.Type.__floordiv__": true,
+ "google.generativeai.protos.Type.__ge__": true,
+ "google.generativeai.protos.Type.__getitem__": true,
+ "google.generativeai.protos.Type.__gt__": true,
+ "google.generativeai.protos.Type.__init__": true,
+ "google.generativeai.protos.Type.__invert__": true,
+ "google.generativeai.protos.Type.__iter__": true,
+ "google.generativeai.protos.Type.__le__": true,
+ "google.generativeai.protos.Type.__len__": true,
+ "google.generativeai.protos.Type.__lshift__": true,
+ "google.generativeai.protos.Type.__lt__": true,
+ "google.generativeai.protos.Type.__mod__": true,
+ "google.generativeai.protos.Type.__mul__": true,
+ "google.generativeai.protos.Type.__ne__": true,
+ "google.generativeai.protos.Type.__neg__": true,
+ "google.generativeai.protos.Type.__new__": true,
+ "google.generativeai.protos.Type.__or__": true,
+ "google.generativeai.protos.Type.__pos__": true,
+ "google.generativeai.protos.Type.__pow__": true,
+ "google.generativeai.protos.Type.__radd__": true,
+ "google.generativeai.protos.Type.__rand__": true,
+ "google.generativeai.protos.Type.__rfloordiv__": true,
+ "google.generativeai.protos.Type.__rlshift__": true,
+ "google.generativeai.protos.Type.__rmod__": true,
+ "google.generativeai.protos.Type.__rmul__": true,
+ "google.generativeai.protos.Type.__ror__": true,
+ "google.generativeai.protos.Type.__rpow__": true,
+ "google.generativeai.protos.Type.__rrshift__": true,
+ "google.generativeai.protos.Type.__rshift__": true,
+ "google.generativeai.protos.Type.__rsub__": true,
+ "google.generativeai.protos.Type.__rtruediv__": true,
+ "google.generativeai.protos.Type.__rxor__": true,
+ "google.generativeai.protos.Type.__sub__": true,
+ "google.generativeai.protos.Type.__truediv__": true,
+ "google.generativeai.protos.Type.__xor__": true,
+ "google.generativeai.protos.Type.as_integer_ratio": true,
+ "google.generativeai.protos.Type.bit_count": true,
+ "google.generativeai.protos.Type.bit_length": true,
+ "google.generativeai.protos.Type.conjugate": true,
+ "google.generativeai.protos.Type.denominator": true,
+ "google.generativeai.protos.Type.from_bytes": true,
+ "google.generativeai.protos.Type.imag": true,
+ "google.generativeai.protos.Type.numerator": true,
+ "google.generativeai.protos.Type.real": true,
+ "google.generativeai.protos.Type.to_bytes": true,
+ "google.generativeai.protos.UpdateCachedContentRequest": false,
+ "google.generativeai.protos.UpdateCachedContentRequest.__call__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__init__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__le__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__new__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__or__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.cached_content": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.from_json": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.mro": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.pb": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.serialize": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.to_json": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.update_mask": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.wrap": true,
+ "google.generativeai.protos.UpdateChunkRequest": false,
+ "google.generativeai.protos.UpdateChunkRequest.__call__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__eq__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__ge__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__gt__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__init__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__le__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__lt__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__ne__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__new__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__or__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__ror__": true,
+ "google.generativeai.protos.UpdateChunkRequest.chunk": true,
+ "google.generativeai.protos.UpdateChunkRequest.copy_from": true,
+ "google.generativeai.protos.UpdateChunkRequest.deserialize": true,
+ "google.generativeai.protos.UpdateChunkRequest.from_json": true,
+ "google.generativeai.protos.UpdateChunkRequest.mro": true,
+ "google.generativeai.protos.UpdateChunkRequest.pb": true,
+ "google.generativeai.protos.UpdateChunkRequest.serialize": true,
+ "google.generativeai.protos.UpdateChunkRequest.to_dict": true,
+ "google.generativeai.protos.UpdateChunkRequest.to_json": true,
+ "google.generativeai.protos.UpdateChunkRequest.update_mask": true,
+ "google.generativeai.protos.UpdateChunkRequest.wrap": true,
+ "google.generativeai.protos.UpdateCorpusRequest": false,
+ "google.generativeai.protos.UpdateCorpusRequest.__call__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__eq__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__ge__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__gt__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__init__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__le__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__lt__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__ne__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__new__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__or__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__ror__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.copy_from": true,
+ "google.generativeai.protos.UpdateCorpusRequest.corpus": true,
+ "google.generativeai.protos.UpdateCorpusRequest.deserialize": true,
+ "google.generativeai.protos.UpdateCorpusRequest.from_json": true,
+ "google.generativeai.protos.UpdateCorpusRequest.mro": true,
+ "google.generativeai.protos.UpdateCorpusRequest.pb": true,
+ "google.generativeai.protos.UpdateCorpusRequest.serialize": true,
+ "google.generativeai.protos.UpdateCorpusRequest.to_dict": true,
+ "google.generativeai.protos.UpdateCorpusRequest.to_json": true,
+ "google.generativeai.protos.UpdateCorpusRequest.update_mask": true,
+ "google.generativeai.protos.UpdateCorpusRequest.wrap": true,
+ "google.generativeai.protos.UpdateDocumentRequest": false,
+ "google.generativeai.protos.UpdateDocumentRequest.__call__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__eq__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__ge__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__gt__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__init__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__le__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__lt__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__ne__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__new__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__or__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__ror__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.copy_from": true,
+ "google.generativeai.protos.UpdateDocumentRequest.deserialize": true,
+ "google.generativeai.protos.UpdateDocumentRequest.document": true,
+ "google.generativeai.protos.UpdateDocumentRequest.from_json": true,
+ "google.generativeai.protos.UpdateDocumentRequest.mro": true,
+ "google.generativeai.protos.UpdateDocumentRequest.pb": true,
+ "google.generativeai.protos.UpdateDocumentRequest.serialize": true,
+ "google.generativeai.protos.UpdateDocumentRequest.to_dict": true,
+ "google.generativeai.protos.UpdateDocumentRequest.to_json": true,
+ "google.generativeai.protos.UpdateDocumentRequest.update_mask": true,
+ "google.generativeai.protos.UpdateDocumentRequest.wrap": true,
+ "google.generativeai.protos.UpdatePermissionRequest": false,
+ "google.generativeai.protos.UpdatePermissionRequest.__call__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__eq__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__ge__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__gt__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__init__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__le__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__lt__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__ne__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__new__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__or__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__ror__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.copy_from": true,
+ "google.generativeai.protos.UpdatePermissionRequest.deserialize": true,
+ "google.generativeai.protos.UpdatePermissionRequest.from_json": true,
+ "google.generativeai.protos.UpdatePermissionRequest.mro": true,
+ "google.generativeai.protos.UpdatePermissionRequest.pb": true,
+ "google.generativeai.protos.UpdatePermissionRequest.permission": true,
+ "google.generativeai.protos.UpdatePermissionRequest.serialize": true,
+ "google.generativeai.protos.UpdatePermissionRequest.to_dict": true,
+ "google.generativeai.protos.UpdatePermissionRequest.to_json": true,
+ "google.generativeai.protos.UpdatePermissionRequest.update_mask": true,
+ "google.generativeai.protos.UpdatePermissionRequest.wrap": true,
+ "google.generativeai.protos.UpdateTunedModelRequest": false,
+ "google.generativeai.protos.UpdateTunedModelRequest.__call__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__init__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__le__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__new__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__or__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.from_json": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.mro": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.pb": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.serialize": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.to_json": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.tuned_model": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.update_mask": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.wrap": true,
+ "google.generativeai.protos.VideoMetadata": false,
+ "google.generativeai.protos.VideoMetadata.__call__": true,
+ "google.generativeai.protos.VideoMetadata.__eq__": true,
+ "google.generativeai.protos.VideoMetadata.__ge__": true,
+ "google.generativeai.protos.VideoMetadata.__gt__": true,
+ "google.generativeai.protos.VideoMetadata.__init__": true,
+ "google.generativeai.protos.VideoMetadata.__le__": true,
+ "google.generativeai.protos.VideoMetadata.__lt__": true,
+ "google.generativeai.protos.VideoMetadata.__ne__": true,
+ "google.generativeai.protos.VideoMetadata.__new__": true,
+ "google.generativeai.protos.VideoMetadata.__or__": true,
+ "google.generativeai.protos.VideoMetadata.__ror__": true,
+ "google.generativeai.protos.VideoMetadata.copy_from": true,
+ "google.generativeai.protos.VideoMetadata.deserialize": true,
+ "google.generativeai.protos.VideoMetadata.from_json": true,
+ "google.generativeai.protos.VideoMetadata.mro": true,
+ "google.generativeai.protos.VideoMetadata.pb": true,
+ "google.generativeai.protos.VideoMetadata.serialize": true,
+ "google.generativeai.protos.VideoMetadata.to_dict": true,
+ "google.generativeai.protos.VideoMetadata.to_json": true,
+ "google.generativeai.protos.VideoMetadata.video_duration": true,
+ "google.generativeai.protos.VideoMetadata.wrap": true,
+ "google.generativeai.types": false,
+ "google.generativeai.types.AnyModelNameOptions": false,
+ "google.generativeai.types.AsyncGenerateContentResponse": false,
+ "google.generativeai.types.AsyncGenerateContentResponse.__eq__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__ge__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__gt__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__init__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__le__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__lt__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__ne__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__new__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.candidates": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.from_response": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.parts": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.resolve": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.text": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.to_dict": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata": true,
+ "google.generativeai.types.AuthorError": false,
+ "google.generativeai.types.AuthorError.__eq__": true,
+ "google.generativeai.types.AuthorError.__ge__": true,
+ "google.generativeai.types.AuthorError.__gt__": true,
+ "google.generativeai.types.AuthorError.__init__": true,
+ "google.generativeai.types.AuthorError.__le__": true,
+ "google.generativeai.types.AuthorError.__lt__": true,
+ "google.generativeai.types.AuthorError.__ne__": true,
+ "google.generativeai.types.AuthorError.__new__": true,
+ "google.generativeai.types.AuthorError.add_note": true,
+ "google.generativeai.types.AuthorError.args": true,
+ "google.generativeai.types.AuthorError.with_traceback": true,
+ "google.generativeai.types.BaseModelNameOptions": false,
+ "google.generativeai.types.BlobDict": false,
+ "google.generativeai.types.BlobDict.__contains__": true,
+ "google.generativeai.types.BlobDict.__eq__": true,
+ "google.generativeai.types.BlobDict.__ge__": true,
+ "google.generativeai.types.BlobDict.__getitem__": true,
+ "google.generativeai.types.BlobDict.__gt__": true,
+ "google.generativeai.types.BlobDict.__init__": true,
+ "google.generativeai.types.BlobDict.__iter__": true,
+ "google.generativeai.types.BlobDict.__le__": true,
+ "google.generativeai.types.BlobDict.__len__": true,
+ "google.generativeai.types.BlobDict.__lt__": true,
+ "google.generativeai.types.BlobDict.__ne__": true,
+ "google.generativeai.types.BlobDict.__new__": true,
+ "google.generativeai.types.BlobDict.__or__": true,
+ "google.generativeai.types.BlobDict.__ror__": true,
+ "google.generativeai.types.BlobDict.clear": true,
+ "google.generativeai.types.BlobDict.copy": true,
+ "google.generativeai.types.BlobDict.fromkeys": true,
+ "google.generativeai.types.BlobDict.get": true,
+ "google.generativeai.types.BlobDict.items": true,
+ "google.generativeai.types.BlobDict.keys": true,
+ "google.generativeai.types.BlobDict.pop": true,
+ "google.generativeai.types.BlobDict.popitem": true,
+ "google.generativeai.types.BlobDict.setdefault": true,
+ "google.generativeai.types.BlobDict.update": true,
+ "google.generativeai.types.BlobDict.values": true,
+ "google.generativeai.types.BlobType": false,
+ "google.generativeai.types.BlockedPromptException": false,
+ "google.generativeai.types.BlockedPromptException.__eq__": true,
+ "google.generativeai.types.BlockedPromptException.__ge__": true,
+ "google.generativeai.types.BlockedPromptException.__gt__": true,
+ "google.generativeai.types.BlockedPromptException.__init__": true,
+ "google.generativeai.types.BlockedPromptException.__le__": true,
+ "google.generativeai.types.BlockedPromptException.__lt__": true,
+ "google.generativeai.types.BlockedPromptException.__ne__": true,
+ "google.generativeai.types.BlockedPromptException.__new__": true,
+ "google.generativeai.types.BlockedPromptException.add_note": true,
+ "google.generativeai.types.BlockedPromptException.args": true,
+ "google.generativeai.types.BlockedPromptException.with_traceback": true,
+ "google.generativeai.types.BlockedReason": false,
+ "google.generativeai.types.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true,
+ "google.generativeai.types.BlockedReason.OTHER": true,
+ "google.generativeai.types.BlockedReason.SAFETY": true,
+ "google.generativeai.types.BlockedReason.__abs__": true,
+ "google.generativeai.types.BlockedReason.__add__": true,
+ "google.generativeai.types.BlockedReason.__and__": true,
+ "google.generativeai.types.BlockedReason.__bool__": true,
+ "google.generativeai.types.BlockedReason.__contains__": true,
+ "google.generativeai.types.BlockedReason.__eq__": true,
+ "google.generativeai.types.BlockedReason.__floordiv__": true,
+ "google.generativeai.types.BlockedReason.__ge__": true,
+ "google.generativeai.types.BlockedReason.__getitem__": true,
+ "google.generativeai.types.BlockedReason.__gt__": true,
+ "google.generativeai.types.BlockedReason.__init__": true,
+ "google.generativeai.types.BlockedReason.__invert__": true,
+ "google.generativeai.types.BlockedReason.__iter__": true,
+ "google.generativeai.types.BlockedReason.__le__": true,
+ "google.generativeai.types.BlockedReason.__len__": true,
+ "google.generativeai.types.BlockedReason.__lshift__": true,
+ "google.generativeai.types.BlockedReason.__lt__": true,
+ "google.generativeai.types.BlockedReason.__mod__": true,
+ "google.generativeai.types.BlockedReason.__mul__": true,
+ "google.generativeai.types.BlockedReason.__ne__": true,
+ "google.generativeai.types.BlockedReason.__neg__": true,
+ "google.generativeai.types.BlockedReason.__new__": true,
+ "google.generativeai.types.BlockedReason.__or__": true,
+ "google.generativeai.types.BlockedReason.__pos__": true,
+ "google.generativeai.types.BlockedReason.__pow__": true,
+ "google.generativeai.types.BlockedReason.__radd__": true,
+ "google.generativeai.types.BlockedReason.__rand__": true,
+ "google.generativeai.types.BlockedReason.__rfloordiv__": true,
+ "google.generativeai.types.BlockedReason.__rlshift__": true,
+ "google.generativeai.types.BlockedReason.__rmod__": true,
+ "google.generativeai.types.BlockedReason.__rmul__": true,
+ "google.generativeai.types.BlockedReason.__ror__": true,
+ "google.generativeai.types.BlockedReason.__rpow__": true,
+ "google.generativeai.types.BlockedReason.__rrshift__": true,
+ "google.generativeai.types.BlockedReason.__rshift__": true,
+ "google.generativeai.types.BlockedReason.__rsub__": true,
+ "google.generativeai.types.BlockedReason.__rtruediv__": true,
+ "google.generativeai.types.BlockedReason.__rxor__": true,
+ "google.generativeai.types.BlockedReason.__sub__": true,
+ "google.generativeai.types.BlockedReason.__truediv__": true,
+ "google.generativeai.types.BlockedReason.__xor__": true,
+ "google.generativeai.types.BlockedReason.as_integer_ratio": true,
+ "google.generativeai.types.BlockedReason.bit_count": true,
+ "google.generativeai.types.BlockedReason.bit_length": true,
+ "google.generativeai.types.BlockedReason.conjugate": true,
+ "google.generativeai.types.BlockedReason.denominator": true,
+ "google.generativeai.types.BlockedReason.from_bytes": true,
+ "google.generativeai.types.BlockedReason.imag": true,
+ "google.generativeai.types.BlockedReason.numerator": true,
+ "google.generativeai.types.BlockedReason.real": true,
+ "google.generativeai.types.BlockedReason.to_bytes": true,
+ "google.generativeai.types.BrokenResponseError": false,
+ "google.generativeai.types.BrokenResponseError.__eq__": true,
+ "google.generativeai.types.BrokenResponseError.__ge__": true,
+ "google.generativeai.types.BrokenResponseError.__gt__": true,
+ "google.generativeai.types.BrokenResponseError.__init__": true,
+ "google.generativeai.types.BrokenResponseError.__le__": true,
+ "google.generativeai.types.BrokenResponseError.__lt__": true,
+ "google.generativeai.types.BrokenResponseError.__ne__": true,
+ "google.generativeai.types.BrokenResponseError.__new__": true,
+ "google.generativeai.types.BrokenResponseError.add_note": true,
+ "google.generativeai.types.BrokenResponseError.args": true,
+ "google.generativeai.types.BrokenResponseError.with_traceback": true,
+ "google.generativeai.types.CallableFunctionDeclaration": false,
+ "google.generativeai.types.CallableFunctionDeclaration.__call__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__eq__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__ge__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__gt__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__init__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__le__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__lt__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__ne__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__new__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.description": true,
+ "google.generativeai.types.CallableFunctionDeclaration.from_function": true,
+ "google.generativeai.types.CallableFunctionDeclaration.from_proto": true,
+ "google.generativeai.types.CallableFunctionDeclaration.name": true,
+ "google.generativeai.types.CallableFunctionDeclaration.parameters": true,
+ "google.generativeai.types.CallableFunctionDeclaration.to_proto": true,
+ "google.generativeai.types.ChatResponse": false,
+ "google.generativeai.types.ChatResponse.__eq__": true,
+ "google.generativeai.types.ChatResponse.__ge__": true,
+ "google.generativeai.types.ChatResponse.__gt__": true,
+ "google.generativeai.types.ChatResponse.__init__": true,
+ "google.generativeai.types.ChatResponse.__le__": true,
+ "google.generativeai.types.ChatResponse.__lt__": true,
+ "google.generativeai.types.ChatResponse.__ne__": true,
+ "google.generativeai.types.ChatResponse.__new__": true,
+ "google.generativeai.types.ChatResponse.last": true,
+ "google.generativeai.types.ChatResponse.reply": true,
+ "google.generativeai.types.ChatResponse.to_dict": true,
+ "google.generativeai.types.ChatResponse.top_k": true,
+ "google.generativeai.types.ChatResponse.top_p": true,
+ "google.generativeai.types.CitationMetadataDict": false,
+ "google.generativeai.types.CitationMetadataDict.__contains__": true,
+ "google.generativeai.types.CitationMetadataDict.__eq__": true,
+ "google.generativeai.types.CitationMetadataDict.__ge__": true,
+ "google.generativeai.types.CitationMetadataDict.__getitem__": true,
+ "google.generativeai.types.CitationMetadataDict.__gt__": true,
+ "google.generativeai.types.CitationMetadataDict.__init__": true,
+ "google.generativeai.types.CitationMetadataDict.__iter__": true,
+ "google.generativeai.types.CitationMetadataDict.__le__": true,
+ "google.generativeai.types.CitationMetadataDict.__len__": true,
+ "google.generativeai.types.CitationMetadataDict.__lt__": true,
+ "google.generativeai.types.CitationMetadataDict.__ne__": true,
+ "google.generativeai.types.CitationMetadataDict.__new__": true,
+ "google.generativeai.types.CitationMetadataDict.__or__": true,
+ "google.generativeai.types.CitationMetadataDict.__ror__": true,
+ "google.generativeai.types.CitationMetadataDict.clear": true,
+ "google.generativeai.types.CitationMetadataDict.copy": true,
+ "google.generativeai.types.CitationMetadataDict.fromkeys": true,
+ "google.generativeai.types.CitationMetadataDict.get": true,
+ "google.generativeai.types.CitationMetadataDict.items": true,
+ "google.generativeai.types.CitationMetadataDict.keys": true,
+ "google.generativeai.types.CitationMetadataDict.pop": true,
+ "google.generativeai.types.CitationMetadataDict.popitem": true,
+ "google.generativeai.types.CitationMetadataDict.setdefault": true,
+ "google.generativeai.types.CitationMetadataDict.update": true,
+ "google.generativeai.types.CitationMetadataDict.values": true,
+ "google.generativeai.types.CitationSourceDict": false,
+ "google.generativeai.types.CitationSourceDict.__contains__": true,
+ "google.generativeai.types.CitationSourceDict.__eq__": true,
+ "google.generativeai.types.CitationSourceDict.__ge__": true,
+ "google.generativeai.types.CitationSourceDict.__getitem__": true,
+ "google.generativeai.types.CitationSourceDict.__gt__": true,
+ "google.generativeai.types.CitationSourceDict.__init__": true,
+ "google.generativeai.types.CitationSourceDict.__iter__": true,
+ "google.generativeai.types.CitationSourceDict.__le__": true,
+ "google.generativeai.types.CitationSourceDict.__len__": true,
+ "google.generativeai.types.CitationSourceDict.__lt__": true,
+ "google.generativeai.types.CitationSourceDict.__ne__": true,
+ "google.generativeai.types.CitationSourceDict.__new__": true,
+ "google.generativeai.types.CitationSourceDict.__or__": true,
+ "google.generativeai.types.CitationSourceDict.__ror__": true,
+ "google.generativeai.types.CitationSourceDict.clear": true,
+ "google.generativeai.types.CitationSourceDict.copy": true,
+ "google.generativeai.types.CitationSourceDict.fromkeys": true,
+ "google.generativeai.types.CitationSourceDict.get": true,
+ "google.generativeai.types.CitationSourceDict.items": true,
+ "google.generativeai.types.CitationSourceDict.keys": true,
+ "google.generativeai.types.CitationSourceDict.pop": true,
+ "google.generativeai.types.CitationSourceDict.popitem": true,
+ "google.generativeai.types.CitationSourceDict.setdefault": true,
+ "google.generativeai.types.CitationSourceDict.update": true,
+ "google.generativeai.types.CitationSourceDict.values": true,
+ "google.generativeai.types.Completion": false,
+ "google.generativeai.types.Completion.__eq__": true,
+ "google.generativeai.types.Completion.__ge__": true,
+ "google.generativeai.types.Completion.__gt__": true,
+ "google.generativeai.types.Completion.__init__": true,
+ "google.generativeai.types.Completion.__le__": true,
+ "google.generativeai.types.Completion.__lt__": true,
+ "google.generativeai.types.Completion.__ne__": true,
+ "google.generativeai.types.Completion.__new__": true,
+ "google.generativeai.types.Completion.to_dict": true,
+ "google.generativeai.types.ContentDict": false,
+ "google.generativeai.types.ContentDict.__contains__": true,
+ "google.generativeai.types.ContentDict.__eq__": true,
+ "google.generativeai.types.ContentDict.__ge__": true,
+ "google.generativeai.types.ContentDict.__getitem__": true,
+ "google.generativeai.types.ContentDict.__gt__": true,
+ "google.generativeai.types.ContentDict.__init__": true,
+ "google.generativeai.types.ContentDict.__iter__": true,
+ "google.generativeai.types.ContentDict.__le__": true,
+ "google.generativeai.types.ContentDict.__len__": true,
+ "google.generativeai.types.ContentDict.__lt__": true,
+ "google.generativeai.types.ContentDict.__ne__": true,
+ "google.generativeai.types.ContentDict.__new__": true,
+ "google.generativeai.types.ContentDict.__or__": true,
+ "google.generativeai.types.ContentDict.__ror__": true,
+ "google.generativeai.types.ContentDict.clear": true,
+ "google.generativeai.types.ContentDict.copy": true,
+ "google.generativeai.types.ContentDict.fromkeys": true,
+ "google.generativeai.types.ContentDict.get": true,
+ "google.generativeai.types.ContentDict.items": true,
+ "google.generativeai.types.ContentDict.keys": true,
+ "google.generativeai.types.ContentDict.pop": true,
+ "google.generativeai.types.ContentDict.popitem": true,
+ "google.generativeai.types.ContentDict.setdefault": true,
+ "google.generativeai.types.ContentDict.update": true,
+ "google.generativeai.types.ContentDict.values": true,
+ "google.generativeai.types.ContentFilterDict": false,
+ "google.generativeai.types.ContentFilterDict.__contains__": true,
+ "google.generativeai.types.ContentFilterDict.__eq__": true,
+ "google.generativeai.types.ContentFilterDict.__ge__": true,
+ "google.generativeai.types.ContentFilterDict.__getitem__": true,
+ "google.generativeai.types.ContentFilterDict.__gt__": true,
+ "google.generativeai.types.ContentFilterDict.__init__": true,
+ "google.generativeai.types.ContentFilterDict.__iter__": true,
+ "google.generativeai.types.ContentFilterDict.__le__": true,
+ "google.generativeai.types.ContentFilterDict.__len__": true,
+ "google.generativeai.types.ContentFilterDict.__lt__": true,
+ "google.generativeai.types.ContentFilterDict.__ne__": true,
+ "google.generativeai.types.ContentFilterDict.__new__": true,
+ "google.generativeai.types.ContentFilterDict.__or__": true,
+ "google.generativeai.types.ContentFilterDict.__ror__": true,
+ "google.generativeai.types.ContentFilterDict.clear": true,
+ "google.generativeai.types.ContentFilterDict.copy": true,
+ "google.generativeai.types.ContentFilterDict.fromkeys": true,
+ "google.generativeai.types.ContentFilterDict.get": true,
+ "google.generativeai.types.ContentFilterDict.items": true,
+ "google.generativeai.types.ContentFilterDict.keys": true,
+ "google.generativeai.types.ContentFilterDict.pop": true,
+ "google.generativeai.types.ContentFilterDict.popitem": true,
+ "google.generativeai.types.ContentFilterDict.setdefault": true,
+ "google.generativeai.types.ContentFilterDict.update": true,
+ "google.generativeai.types.ContentFilterDict.values": true,
+ "google.generativeai.types.ContentType": false,
+ "google.generativeai.types.ContentsType": false,
+ "google.generativeai.types.ExampleDict": false,
+ "google.generativeai.types.ExampleDict.__contains__": true,
+ "google.generativeai.types.ExampleDict.__eq__": true,
+ "google.generativeai.types.ExampleDict.__ge__": true,
+ "google.generativeai.types.ExampleDict.__getitem__": true,
+ "google.generativeai.types.ExampleDict.__gt__": true,
+ "google.generativeai.types.ExampleDict.__init__": true,
+ "google.generativeai.types.ExampleDict.__iter__": true,
+ "google.generativeai.types.ExampleDict.__le__": true,
+ "google.generativeai.types.ExampleDict.__len__": true,
+ "google.generativeai.types.ExampleDict.__lt__": true,
+ "google.generativeai.types.ExampleDict.__ne__": true,
+ "google.generativeai.types.ExampleDict.__new__": true,
+ "google.generativeai.types.ExampleDict.__or__": true,
+ "google.generativeai.types.ExampleDict.__ror__": true,
+ "google.generativeai.types.ExampleDict.clear": true,
+ "google.generativeai.types.ExampleDict.copy": true,
+ "google.generativeai.types.ExampleDict.fromkeys": true,
+ "google.generativeai.types.ExampleDict.get": true,
+ "google.generativeai.types.ExampleDict.items": true,
+ "google.generativeai.types.ExampleDict.keys": true,
+ "google.generativeai.types.ExampleDict.pop": true,
+ "google.generativeai.types.ExampleDict.popitem": true,
+ "google.generativeai.types.ExampleDict.setdefault": true,
+ "google.generativeai.types.ExampleDict.update": true,
+ "google.generativeai.types.ExampleDict.values": true,
+ "google.generativeai.types.ExampleOptions": false,
+ "google.generativeai.types.ExamplesOptions": false,
+ "google.generativeai.types.File": false,
+ "google.generativeai.types.File.__eq__": true,
+ "google.generativeai.types.File.__ge__": true,
+ "google.generativeai.types.File.__gt__": true,
+ "google.generativeai.types.File.__init__": true,
+ "google.generativeai.types.File.__le__": true,
+ "google.generativeai.types.File.__lt__": true,
+ "google.generativeai.types.File.__ne__": true,
+ "google.generativeai.types.File.__new__": true,
+ "google.generativeai.types.File.create_time": true,
+ "google.generativeai.types.File.delete": true,
+ "google.generativeai.types.File.display_name": true,
+ "google.generativeai.types.File.error": true,
+ "google.generativeai.types.File.expiration_time": true,
+ "google.generativeai.types.File.mime_type": true,
+ "google.generativeai.types.File.name": true,
+ "google.generativeai.types.File.sha256_hash": true,
+ "google.generativeai.types.File.size_bytes": true,
+ "google.generativeai.types.File.state": true,
+ "google.generativeai.types.File.to_dict": true,
+ "google.generativeai.types.File.to_proto": true,
+ "google.generativeai.types.File.update_time": true,
+ "google.generativeai.types.File.uri": true,
+ "google.generativeai.types.File.video_metadata": true,
+ "google.generativeai.types.FileDataDict": false,
+ "google.generativeai.types.FileDataDict.__contains__": true,
+ "google.generativeai.types.FileDataDict.__eq__": true,
+ "google.generativeai.types.FileDataDict.__ge__": true,
+ "google.generativeai.types.FileDataDict.__getitem__": true,
+ "google.generativeai.types.FileDataDict.__gt__": true,
+ "google.generativeai.types.FileDataDict.__init__": true,
+ "google.generativeai.types.FileDataDict.__iter__": true,
+ "google.generativeai.types.FileDataDict.__le__": true,
+ "google.generativeai.types.FileDataDict.__len__": true,
+ "google.generativeai.types.FileDataDict.__lt__": true,
+ "google.generativeai.types.FileDataDict.__ne__": true,
+ "google.generativeai.types.FileDataDict.__new__": true,
+ "google.generativeai.types.FileDataDict.__or__": true,
+ "google.generativeai.types.FileDataDict.__ror__": true,
+ "google.generativeai.types.FileDataDict.clear": true,
+ "google.generativeai.types.FileDataDict.copy": true,
+ "google.generativeai.types.FileDataDict.fromkeys": true,
+ "google.generativeai.types.FileDataDict.get": true,
+ "google.generativeai.types.FileDataDict.items": true,
+ "google.generativeai.types.FileDataDict.keys": true,
+ "google.generativeai.types.FileDataDict.pop": true,
+ "google.generativeai.types.FileDataDict.popitem": true,
+ "google.generativeai.types.FileDataDict.setdefault": true,
+ "google.generativeai.types.FileDataDict.update": true,
+ "google.generativeai.types.FileDataDict.values": true,
+ "google.generativeai.types.FileDataType": false,
+ "google.generativeai.types.FunctionDeclaration": false,
+ "google.generativeai.types.FunctionDeclaration.__eq__": true,
+ "google.generativeai.types.FunctionDeclaration.__ge__": true,
+ "google.generativeai.types.FunctionDeclaration.__gt__": true,
+ "google.generativeai.types.FunctionDeclaration.__init__": true,
+ "google.generativeai.types.FunctionDeclaration.__le__": true,
+ "google.generativeai.types.FunctionDeclaration.__lt__": true,
+ "google.generativeai.types.FunctionDeclaration.__ne__": true,
+ "google.generativeai.types.FunctionDeclaration.__new__": true,
+ "google.generativeai.types.FunctionDeclaration.description": true,
+ "google.generativeai.types.FunctionDeclaration.from_function": true,
+ "google.generativeai.types.FunctionDeclaration.from_proto": true,
+ "google.generativeai.types.FunctionDeclaration.name": true,
+ "google.generativeai.types.FunctionDeclaration.parameters": true,
+ "google.generativeai.types.FunctionDeclaration.to_proto": true,
+ "google.generativeai.types.FunctionDeclarationType": false,
+ "google.generativeai.types.FunctionLibrary": false,
+ "google.generativeai.types.FunctionLibrary.__call__": true,
+ "google.generativeai.types.FunctionLibrary.__eq__": true,
+ "google.generativeai.types.FunctionLibrary.__ge__": true,
+ "google.generativeai.types.FunctionLibrary.__getitem__": true,
+ "google.generativeai.types.FunctionLibrary.__gt__": true,
+ "google.generativeai.types.FunctionLibrary.__init__": true,
+ "google.generativeai.types.FunctionLibrary.__le__": true,
+ "google.generativeai.types.FunctionLibrary.__lt__": true,
+ "google.generativeai.types.FunctionLibrary.__ne__": true,
+ "google.generativeai.types.FunctionLibrary.__new__": true,
+ "google.generativeai.types.FunctionLibrary.to_proto": true,
+ "google.generativeai.types.FunctionLibraryType": false,
+ "google.generativeai.types.GenerateContentResponse": false,
+ "google.generativeai.types.GenerateContentResponse.__eq__": true,
+ "google.generativeai.types.GenerateContentResponse.__ge__": true,
+ "google.generativeai.types.GenerateContentResponse.__gt__": true,
+ "google.generativeai.types.GenerateContentResponse.__init__": true,
+ "google.generativeai.types.GenerateContentResponse.__iter__": true,
+ "google.generativeai.types.GenerateContentResponse.__le__": true,
+ "google.generativeai.types.GenerateContentResponse.__lt__": true,
+ "google.generativeai.types.GenerateContentResponse.__ne__": true,
+ "google.generativeai.types.GenerateContentResponse.__new__": true,
+ "google.generativeai.types.GenerateContentResponse.candidates": true,
+ "google.generativeai.types.GenerateContentResponse.from_iterator": true,
+ "google.generativeai.types.GenerateContentResponse.from_response": true,
+ "google.generativeai.types.GenerateContentResponse.parts": true,
+ "google.generativeai.types.GenerateContentResponse.prompt_feedback": true,
+ "google.generativeai.types.GenerateContentResponse.resolve": true,
+ "google.generativeai.types.GenerateContentResponse.text": true,
+ "google.generativeai.types.GenerateContentResponse.to_dict": true,
+ "google.generativeai.types.GenerateContentResponse.usage_metadata": true,
+ "google.generativeai.types.GenerationConfig": false,
+ "google.generativeai.types.GenerationConfig.__eq__": true,
+ "google.generativeai.types.GenerationConfig.__ge__": true,
+ "google.generativeai.types.GenerationConfig.__gt__": true,
+ "google.generativeai.types.GenerationConfig.__init__": true,
+ "google.generativeai.types.GenerationConfig.__le__": true,
+ "google.generativeai.types.GenerationConfig.__lt__": true,
+ "google.generativeai.types.GenerationConfig.__ne__": true,
+ "google.generativeai.types.GenerationConfig.__new__": true,
+ "google.generativeai.types.GenerationConfig.candidate_count": true,
+ "google.generativeai.types.GenerationConfig.max_output_tokens": true,
+ "google.generativeai.types.GenerationConfig.response_mime_type": true,
+ "google.generativeai.types.GenerationConfig.response_schema": true,
+ "google.generativeai.types.GenerationConfig.stop_sequences": true,
+ "google.generativeai.types.GenerationConfig.temperature": true,
+ "google.generativeai.types.GenerationConfig.top_k": true,
+ "google.generativeai.types.GenerationConfig.top_p": true,
+ "google.generativeai.types.GenerationConfigDict": false,
+ "google.generativeai.types.GenerationConfigDict.__contains__": true,
+ "google.generativeai.types.GenerationConfigDict.__eq__": true,
+ "google.generativeai.types.GenerationConfigDict.__ge__": true,
+ "google.generativeai.types.GenerationConfigDict.__getitem__": true,
+ "google.generativeai.types.GenerationConfigDict.__gt__": true,
+ "google.generativeai.types.GenerationConfigDict.__init__": true,
+ "google.generativeai.types.GenerationConfigDict.__iter__": true,
+ "google.generativeai.types.GenerationConfigDict.__le__": true,
+ "google.generativeai.types.GenerationConfigDict.__len__": true,
+ "google.generativeai.types.GenerationConfigDict.__lt__": true,
+ "google.generativeai.types.GenerationConfigDict.__ne__": true,
+ "google.generativeai.types.GenerationConfigDict.__new__": true,
+ "google.generativeai.types.GenerationConfigDict.__or__": true,
+ "google.generativeai.types.GenerationConfigDict.__ror__": true,
+ "google.generativeai.types.GenerationConfigDict.clear": true,
+ "google.generativeai.types.GenerationConfigDict.copy": true,
+ "google.generativeai.types.GenerationConfigDict.fromkeys": true,
+ "google.generativeai.types.GenerationConfigDict.get": true,
+ "google.generativeai.types.GenerationConfigDict.items": true,
+ "google.generativeai.types.GenerationConfigDict.keys": true,
+ "google.generativeai.types.GenerationConfigDict.pop": true,
+ "google.generativeai.types.GenerationConfigDict.popitem": true,
+ "google.generativeai.types.GenerationConfigDict.setdefault": true,
+ "google.generativeai.types.GenerationConfigDict.update": true,
+ "google.generativeai.types.GenerationConfigDict.values": true,
+ "google.generativeai.types.GenerationConfigType": false,
+ "google.generativeai.types.HarmBlockThreshold": false,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_NONE": true,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_ONLY_HIGH": true,
+ "google.generativeai.types.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true,
+ "google.generativeai.types.HarmBlockThreshold.__abs__": true,
+ "google.generativeai.types.HarmBlockThreshold.__add__": true,
+ "google.generativeai.types.HarmBlockThreshold.__and__": true,
+ "google.generativeai.types.HarmBlockThreshold.__bool__": true,
+ "google.generativeai.types.HarmBlockThreshold.__contains__": true,
+ "google.generativeai.types.HarmBlockThreshold.__eq__": true,
+ "google.generativeai.types.HarmBlockThreshold.__floordiv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__ge__": true,
+ "google.generativeai.types.HarmBlockThreshold.__getitem__": true,
+ "google.generativeai.types.HarmBlockThreshold.__gt__": true,
+ "google.generativeai.types.HarmBlockThreshold.__init__": true,
+ "google.generativeai.types.HarmBlockThreshold.__invert__": true,
+ "google.generativeai.types.HarmBlockThreshold.__iter__": true,
+ "google.generativeai.types.HarmBlockThreshold.__le__": true,
+ "google.generativeai.types.HarmBlockThreshold.__len__": true,
+ "google.generativeai.types.HarmBlockThreshold.__lshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__lt__": true,
+ "google.generativeai.types.HarmBlockThreshold.__mod__": true,
+ "google.generativeai.types.HarmBlockThreshold.__mul__": true,
+ "google.generativeai.types.HarmBlockThreshold.__ne__": true,
+ "google.generativeai.types.HarmBlockThreshold.__neg__": true,
+ "google.generativeai.types.HarmBlockThreshold.__new__": true,
+ "google.generativeai.types.HarmBlockThreshold.__or__": true,
+ "google.generativeai.types.HarmBlockThreshold.__pos__": true,
+ "google.generativeai.types.HarmBlockThreshold.__pow__": true,
+ "google.generativeai.types.HarmBlockThreshold.__radd__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rand__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rlshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rmod__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rmul__": true,
+ "google.generativeai.types.HarmBlockThreshold.__ror__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rpow__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rrshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rsub__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rtruediv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rxor__": true,
+ "google.generativeai.types.HarmBlockThreshold.__sub__": true,
+ "google.generativeai.types.HarmBlockThreshold.__truediv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__xor__": true,
+ "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": true,
+ "google.generativeai.types.HarmBlockThreshold.bit_count": true,
+ "google.generativeai.types.HarmBlockThreshold.bit_length": true,
+ "google.generativeai.types.HarmBlockThreshold.conjugate": true,
+ "google.generativeai.types.HarmBlockThreshold.denominator": true,
+ "google.generativeai.types.HarmBlockThreshold.from_bytes": true,
+ "google.generativeai.types.HarmBlockThreshold.imag": true,
+ "google.generativeai.types.HarmBlockThreshold.numerator": true,
+ "google.generativeai.types.HarmBlockThreshold.real": true,
+ "google.generativeai.types.HarmBlockThreshold.to_bytes": true,
+ "google.generativeai.types.HarmCategory": false,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_HARASSMENT": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true,
+ "google.generativeai.types.HarmCategory.__abs__": true,
+ "google.generativeai.types.HarmCategory.__add__": true,
+ "google.generativeai.types.HarmCategory.__and__": true,
+ "google.generativeai.types.HarmCategory.__bool__": true,
+ "google.generativeai.types.HarmCategory.__contains__": true,
+ "google.generativeai.types.HarmCategory.__eq__": true,
+ "google.generativeai.types.HarmCategory.__floordiv__": true,
+ "google.generativeai.types.HarmCategory.__ge__": true,
+ "google.generativeai.types.HarmCategory.__getitem__": true,
+ "google.generativeai.types.HarmCategory.__gt__": true,
+ "google.generativeai.types.HarmCategory.__init__": true,
+ "google.generativeai.types.HarmCategory.__invert__": true,
+ "google.generativeai.types.HarmCategory.__iter__": true,
+ "google.generativeai.types.HarmCategory.__le__": true,
+ "google.generativeai.types.HarmCategory.__len__": true,
+ "google.generativeai.types.HarmCategory.__lshift__": true,
+ "google.generativeai.types.HarmCategory.__lt__": true,
+ "google.generativeai.types.HarmCategory.__mod__": true,
+ "google.generativeai.types.HarmCategory.__mul__": true,
+ "google.generativeai.types.HarmCategory.__ne__": true,
+ "google.generativeai.types.HarmCategory.__neg__": true,
+ "google.generativeai.types.HarmCategory.__new__": true,
+ "google.generativeai.types.HarmCategory.__or__": true,
+ "google.generativeai.types.HarmCategory.__pos__": true,
+ "google.generativeai.types.HarmCategory.__pow__": true,
+ "google.generativeai.types.HarmCategory.__radd__": true,
+ "google.generativeai.types.HarmCategory.__rand__": true,
+ "google.generativeai.types.HarmCategory.__rfloordiv__": true,
+ "google.generativeai.types.HarmCategory.__rlshift__": true,
+ "google.generativeai.types.HarmCategory.__rmod__": true,
+ "google.generativeai.types.HarmCategory.__rmul__": true,
+ "google.generativeai.types.HarmCategory.__ror__": true,
+ "google.generativeai.types.HarmCategory.__rpow__": true,
+ "google.generativeai.types.HarmCategory.__rrshift__": true,
+ "google.generativeai.types.HarmCategory.__rshift__": true,
+ "google.generativeai.types.HarmCategory.__rsub__": true,
+ "google.generativeai.types.HarmCategory.__rtruediv__": true,
+ "google.generativeai.types.HarmCategory.__rxor__": true,
+ "google.generativeai.types.HarmCategory.__sub__": true,
+ "google.generativeai.types.HarmCategory.__truediv__": true,
+ "google.generativeai.types.HarmCategory.__xor__": true,
+ "google.generativeai.types.HarmCategory.as_integer_ratio": true,
+ "google.generativeai.types.HarmCategory.bit_count": true,
+ "google.generativeai.types.HarmCategory.bit_length": true,
+ "google.generativeai.types.HarmCategory.conjugate": true,
+ "google.generativeai.types.HarmCategory.denominator": true,
+ "google.generativeai.types.HarmCategory.from_bytes": true,
+ "google.generativeai.types.HarmCategory.imag": true,
+ "google.generativeai.types.HarmCategory.numerator": true,
+ "google.generativeai.types.HarmCategory.real": true,
+ "google.generativeai.types.HarmCategory.to_bytes": true,
+ "google.generativeai.types.HarmProbability": false,
+ "google.generativeai.types.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true,
+ "google.generativeai.types.HarmProbability.HIGH": true,
+ "google.generativeai.types.HarmProbability.LOW": true,
+ "google.generativeai.types.HarmProbability.MEDIUM": true,
+ "google.generativeai.types.HarmProbability.NEGLIGIBLE": true,
+ "google.generativeai.types.HarmProbability.__abs__": true,
+ "google.generativeai.types.HarmProbability.__add__": true,
+ "google.generativeai.types.HarmProbability.__and__": true,
+ "google.generativeai.types.HarmProbability.__bool__": true,
+ "google.generativeai.types.HarmProbability.__contains__": true,
+ "google.generativeai.types.HarmProbability.__eq__": true,
+ "google.generativeai.types.HarmProbability.__floordiv__": true,
+ "google.generativeai.types.HarmProbability.__ge__": true,
+ "google.generativeai.types.HarmProbability.__getitem__": true,
+ "google.generativeai.types.HarmProbability.__gt__": true,
+ "google.generativeai.types.HarmProbability.__init__": true,
+ "google.generativeai.types.HarmProbability.__invert__": true,
+ "google.generativeai.types.HarmProbability.__iter__": true,
+ "google.generativeai.types.HarmProbability.__le__": true,
+ "google.generativeai.types.HarmProbability.__len__": true,
+ "google.generativeai.types.HarmProbability.__lshift__": true,
+ "google.generativeai.types.HarmProbability.__lt__": true,
+ "google.generativeai.types.HarmProbability.__mod__": true,
+ "google.generativeai.types.HarmProbability.__mul__": true,
+ "google.generativeai.types.HarmProbability.__ne__": true,
+ "google.generativeai.types.HarmProbability.__neg__": true,
+ "google.generativeai.types.HarmProbability.__new__": true,
+ "google.generativeai.types.HarmProbability.__or__": true,
+ "google.generativeai.types.HarmProbability.__pos__": true,
+ "google.generativeai.types.HarmProbability.__pow__": true,
+ "google.generativeai.types.HarmProbability.__radd__": true,
+ "google.generativeai.types.HarmProbability.__rand__": true,
+ "google.generativeai.types.HarmProbability.__rfloordiv__": true,
+ "google.generativeai.types.HarmProbability.__rlshift__": true,
+ "google.generativeai.types.HarmProbability.__rmod__": true,
+ "google.generativeai.types.HarmProbability.__rmul__": true,
+ "google.generativeai.types.HarmProbability.__ror__": true,
+ "google.generativeai.types.HarmProbability.__rpow__": true,
+ "google.generativeai.types.HarmProbability.__rrshift__": true,
+ "google.generativeai.types.HarmProbability.__rshift__": true,
+ "google.generativeai.types.HarmProbability.__rsub__": true,
+ "google.generativeai.types.HarmProbability.__rtruediv__": true,
+ "google.generativeai.types.HarmProbability.__rxor__": true,
+ "google.generativeai.types.HarmProbability.__sub__": true,
+ "google.generativeai.types.HarmProbability.__truediv__": true,
+ "google.generativeai.types.HarmProbability.__xor__": true,
+ "google.generativeai.types.HarmProbability.as_integer_ratio": true,
+ "google.generativeai.types.HarmProbability.bit_count": true,
+ "google.generativeai.types.HarmProbability.bit_length": true,
+ "google.generativeai.types.HarmProbability.conjugate": true,
+ "google.generativeai.types.HarmProbability.denominator": true,
+ "google.generativeai.types.HarmProbability.from_bytes": true,
+ "google.generativeai.types.HarmProbability.imag": true,
+ "google.generativeai.types.HarmProbability.numerator": true,
+ "google.generativeai.types.HarmProbability.real": true,
+ "google.generativeai.types.HarmProbability.to_bytes": true,
+ "google.generativeai.types.IncompleteIterationError": false,
+ "google.generativeai.types.IncompleteIterationError.__eq__": true,
+ "google.generativeai.types.IncompleteIterationError.__ge__": true,
+ "google.generativeai.types.IncompleteIterationError.__gt__": true,
+ "google.generativeai.types.IncompleteIterationError.__init__": true,
+ "google.generativeai.types.IncompleteIterationError.__le__": true,
+ "google.generativeai.types.IncompleteIterationError.__lt__": true,
+ "google.generativeai.types.IncompleteIterationError.__ne__": true,
+ "google.generativeai.types.IncompleteIterationError.__new__": true,
+ "google.generativeai.types.IncompleteIterationError.add_note": true,
+ "google.generativeai.types.IncompleteIterationError.args": true,
+ "google.generativeai.types.IncompleteIterationError.with_traceback": true,
+ "google.generativeai.types.MessageDict": false,
+ "google.generativeai.types.MessageDict.__contains__": true,
+ "google.generativeai.types.MessageDict.__eq__": true,
+ "google.generativeai.types.MessageDict.__ge__": true,
+ "google.generativeai.types.MessageDict.__getitem__": true,
+ "google.generativeai.types.MessageDict.__gt__": true,
+ "google.generativeai.types.MessageDict.__init__": true,
+ "google.generativeai.types.MessageDict.__iter__": true,
+ "google.generativeai.types.MessageDict.__le__": true,
+ "google.generativeai.types.MessageDict.__len__": true,
+ "google.generativeai.types.MessageDict.__lt__": true,
+ "google.generativeai.types.MessageDict.__ne__": true,
+ "google.generativeai.types.MessageDict.__new__": true,
+ "google.generativeai.types.MessageDict.__or__": true,
+ "google.generativeai.types.MessageDict.__ror__": true,
+ "google.generativeai.types.MessageDict.clear": true,
+ "google.generativeai.types.MessageDict.copy": true,
+ "google.generativeai.types.MessageDict.fromkeys": true,
+ "google.generativeai.types.MessageDict.get": true,
+ "google.generativeai.types.MessageDict.items": true,
+ "google.generativeai.types.MessageDict.keys": true,
+ "google.generativeai.types.MessageDict.pop": true,
+ "google.generativeai.types.MessageDict.popitem": true,
+ "google.generativeai.types.MessageDict.setdefault": true,
+ "google.generativeai.types.MessageDict.update": true,
+ "google.generativeai.types.MessageDict.values": true,
+ "google.generativeai.types.MessageOptions": false,
+ "google.generativeai.types.MessagePromptDict": false,
+ "google.generativeai.types.MessagePromptDict.__contains__": true,
+ "google.generativeai.types.MessagePromptDict.__eq__": true,
+ "google.generativeai.types.MessagePromptDict.__ge__": true,
+ "google.generativeai.types.MessagePromptDict.__getitem__": true,
+ "google.generativeai.types.MessagePromptDict.__gt__": true,
+ "google.generativeai.types.MessagePromptDict.__init__": true,
+ "google.generativeai.types.MessagePromptDict.__iter__": true,
+ "google.generativeai.types.MessagePromptDict.__le__": true,
+ "google.generativeai.types.MessagePromptDict.__len__": true,
+ "google.generativeai.types.MessagePromptDict.__lt__": true,
+ "google.generativeai.types.MessagePromptDict.__ne__": true,
+ "google.generativeai.types.MessagePromptDict.__new__": true,
+ "google.generativeai.types.MessagePromptDict.__or__": true,
+ "google.generativeai.types.MessagePromptDict.__ror__": true,
+ "google.generativeai.types.MessagePromptDict.clear": true,
+ "google.generativeai.types.MessagePromptDict.copy": true,
+ "google.generativeai.types.MessagePromptDict.fromkeys": true,
+ "google.generativeai.types.MessagePromptDict.get": true,
+ "google.generativeai.types.MessagePromptDict.items": true,
+ "google.generativeai.types.MessagePromptDict.keys": true,
+ "google.generativeai.types.MessagePromptDict.pop": true,
+ "google.generativeai.types.MessagePromptDict.popitem": true,
+ "google.generativeai.types.MessagePromptDict.setdefault": true,
+ "google.generativeai.types.MessagePromptDict.update": true,
+ "google.generativeai.types.MessagePromptDict.values": true,
+ "google.generativeai.types.MessagePromptOptions": false,
+ "google.generativeai.types.MessagesOptions": false,
+ "google.generativeai.types.Model": false,
+ "google.generativeai.types.Model.__eq__": true,
+ "google.generativeai.types.Model.__ge__": true,
+ "google.generativeai.types.Model.__gt__": true,
+ "google.generativeai.types.Model.__init__": true,
+ "google.generativeai.types.Model.__le__": true,
+ "google.generativeai.types.Model.__lt__": true,
+ "google.generativeai.types.Model.__ne__": true,
+ "google.generativeai.types.Model.__new__": true,
+ "google.generativeai.types.Model.max_temperature": true,
+ "google.generativeai.types.Model.temperature": true,
+ "google.generativeai.types.Model.top_k": true,
+ "google.generativeai.types.Model.top_p": true,
+ "google.generativeai.types.ModelNameOptions": false,
+ "google.generativeai.types.ModelsIterable": false,
+ "google.generativeai.types.PartDict": false,
+ "google.generativeai.types.PartDict.__contains__": true,
+ "google.generativeai.types.PartDict.__eq__": true,
+ "google.generativeai.types.PartDict.__ge__": true,
+ "google.generativeai.types.PartDict.__getitem__": true,
+ "google.generativeai.types.PartDict.__gt__": true,
+ "google.generativeai.types.PartDict.__init__": true,
+ "google.generativeai.types.PartDict.__iter__": true,
+ "google.generativeai.types.PartDict.__le__": true,
+ "google.generativeai.types.PartDict.__len__": true,
+ "google.generativeai.types.PartDict.__lt__": true,
+ "google.generativeai.types.PartDict.__ne__": true,
+ "google.generativeai.types.PartDict.__new__": true,
+ "google.generativeai.types.PartDict.__or__": true,
+ "google.generativeai.types.PartDict.__ror__": true,
+ "google.generativeai.types.PartDict.clear": true,
+ "google.generativeai.types.PartDict.copy": true,
+ "google.generativeai.types.PartDict.fromkeys": true,
+ "google.generativeai.types.PartDict.get": true,
+ "google.generativeai.types.PartDict.items": true,
+ "google.generativeai.types.PartDict.keys": true,
+ "google.generativeai.types.PartDict.pop": true,
+ "google.generativeai.types.PartDict.popitem": true,
+ "google.generativeai.types.PartDict.setdefault": true,
+ "google.generativeai.types.PartDict.update": true,
+ "google.generativeai.types.PartDict.values": true,
+ "google.generativeai.types.PartType": false,
+ "google.generativeai.types.Permission": false,
+ "google.generativeai.types.Permission.__eq__": true,
+ "google.generativeai.types.Permission.__ge__": true,
+ "google.generativeai.types.Permission.__gt__": true,
+ "google.generativeai.types.Permission.__init__": true,
+ "google.generativeai.types.Permission.__le__": true,
+ "google.generativeai.types.Permission.__lt__": true,
+ "google.generativeai.types.Permission.__ne__": true,
+ "google.generativeai.types.Permission.__new__": true,
+ "google.generativeai.types.Permission.delete": true,
+ "google.generativeai.types.Permission.delete_async": true,
+ "google.generativeai.types.Permission.email_address": true,
+ "google.generativeai.types.Permission.get": true,
+ "google.generativeai.types.Permission.get_async": true,
+ "google.generativeai.types.Permission.to_dict": true,
+ "google.generativeai.types.Permission.update": true,
+ "google.generativeai.types.Permission.update_async": true,
+ "google.generativeai.types.Permissions": false,
+ "google.generativeai.types.Permissions.__eq__": true,
+ "google.generativeai.types.Permissions.__ge__": true,
+ "google.generativeai.types.Permissions.__gt__": true,
+ "google.generativeai.types.Permissions.__init__": true,
+ "google.generativeai.types.Permissions.__iter__": true,
+ "google.generativeai.types.Permissions.__le__": true,
+ "google.generativeai.types.Permissions.__lt__": true,
+ "google.generativeai.types.Permissions.__ne__": true,
+ "google.generativeai.types.Permissions.__new__": true,
+ "google.generativeai.types.Permissions.create": true,
+ "google.generativeai.types.Permissions.create_async": true,
+ "google.generativeai.types.Permissions.get": true,
+ "google.generativeai.types.Permissions.get_async": true,
+ "google.generativeai.types.Permissions.list": true,
+ "google.generativeai.types.Permissions.list_async": true,
+ "google.generativeai.types.Permissions.parent": true,
+ "google.generativeai.types.Permissions.transfer_ownership": true,
+ "google.generativeai.types.Permissions.transfer_ownership_async": true,
+ "google.generativeai.types.RequestOptions": false,
+ "google.generativeai.types.RequestOptions.__contains__": true,
+ "google.generativeai.types.RequestOptions.__eq__": true,
+ "google.generativeai.types.RequestOptions.__ge__": true,
+ "google.generativeai.types.RequestOptions.__getitem__": true,
+ "google.generativeai.types.RequestOptions.__gt__": true,
+ "google.generativeai.types.RequestOptions.__init__": true,
+ "google.generativeai.types.RequestOptions.__iter__": true,
+ "google.generativeai.types.RequestOptions.__le__": true,
+ "google.generativeai.types.RequestOptions.__len__": true,
+ "google.generativeai.types.RequestOptions.__lt__": true,
+ "google.generativeai.types.RequestOptions.__ne__": true,
+ "google.generativeai.types.RequestOptions.__new__": true,
+ "google.generativeai.types.RequestOptions.get": true,
+ "google.generativeai.types.RequestOptions.items": true,
+ "google.generativeai.types.RequestOptions.keys": true,
+ "google.generativeai.types.RequestOptions.values": true,
+ "google.generativeai.types.RequestOptionsType": false,
+ "google.generativeai.types.ResponseDict": false,
+ "google.generativeai.types.ResponseDict.__contains__": true,
+ "google.generativeai.types.ResponseDict.__eq__": true,
+ "google.generativeai.types.ResponseDict.__ge__": true,
+ "google.generativeai.types.ResponseDict.__getitem__": true,
+ "google.generativeai.types.ResponseDict.__gt__": true,
+ "google.generativeai.types.ResponseDict.__init__": true,
+ "google.generativeai.types.ResponseDict.__iter__": true,
+ "google.generativeai.types.ResponseDict.__le__": true,
+ "google.generativeai.types.ResponseDict.__len__": true,
+ "google.generativeai.types.ResponseDict.__lt__": true,
+ "google.generativeai.types.ResponseDict.__ne__": true,
+ "google.generativeai.types.ResponseDict.__new__": true,
+ "google.generativeai.types.ResponseDict.__or__": true,
+ "google.generativeai.types.ResponseDict.__ror__": true,
+ "google.generativeai.types.ResponseDict.clear": true,
+ "google.generativeai.types.ResponseDict.copy": true,
+ "google.generativeai.types.ResponseDict.fromkeys": true,
+ "google.generativeai.types.ResponseDict.get": true,
+ "google.generativeai.types.ResponseDict.items": true,
+ "google.generativeai.types.ResponseDict.keys": true,
+ "google.generativeai.types.ResponseDict.pop": true,
+ "google.generativeai.types.ResponseDict.popitem": true,
+ "google.generativeai.types.ResponseDict.setdefault": true,
+ "google.generativeai.types.ResponseDict.update": true,
+ "google.generativeai.types.ResponseDict.values": true,
+ "google.generativeai.types.SafetyFeedbackDict": false,
+ "google.generativeai.types.SafetyFeedbackDict.__contains__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__eq__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__ge__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__getitem__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__gt__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__init__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__iter__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__le__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__len__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__lt__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__ne__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__new__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__or__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__ror__": true,
+ "google.generativeai.types.SafetyFeedbackDict.clear": true,
+ "google.generativeai.types.SafetyFeedbackDict.copy": true,
+ "google.generativeai.types.SafetyFeedbackDict.fromkeys": true,
+ "google.generativeai.types.SafetyFeedbackDict.get": true,
+ "google.generativeai.types.SafetyFeedbackDict.items": true,
+ "google.generativeai.types.SafetyFeedbackDict.keys": true,
+ "google.generativeai.types.SafetyFeedbackDict.pop": true,
+ "google.generativeai.types.SafetyFeedbackDict.popitem": true,
+ "google.generativeai.types.SafetyFeedbackDict.setdefault": true,
+ "google.generativeai.types.SafetyFeedbackDict.update": true,
+ "google.generativeai.types.SafetyFeedbackDict.values": true,
+ "google.generativeai.types.SafetyRatingDict": false,
+ "google.generativeai.types.SafetyRatingDict.__contains__": true,
+ "google.generativeai.types.SafetyRatingDict.__eq__": true,
+ "google.generativeai.types.SafetyRatingDict.__ge__": true,
+ "google.generativeai.types.SafetyRatingDict.__getitem__": true,
+ "google.generativeai.types.SafetyRatingDict.__gt__": true,
+ "google.generativeai.types.SafetyRatingDict.__init__": true,
+ "google.generativeai.types.SafetyRatingDict.__iter__": true,
+ "google.generativeai.types.SafetyRatingDict.__le__": true,
+ "google.generativeai.types.SafetyRatingDict.__len__": true,
+ "google.generativeai.types.SafetyRatingDict.__lt__": true,
+ "google.generativeai.types.SafetyRatingDict.__ne__": true,
+ "google.generativeai.types.SafetyRatingDict.__new__": true,
+ "google.generativeai.types.SafetyRatingDict.__or__": true,
+ "google.generativeai.types.SafetyRatingDict.__ror__": true,
+ "google.generativeai.types.SafetyRatingDict.clear": true,
+ "google.generativeai.types.SafetyRatingDict.copy": true,
+ "google.generativeai.types.SafetyRatingDict.fromkeys": true,
+ "google.generativeai.types.SafetyRatingDict.get": true,
+ "google.generativeai.types.SafetyRatingDict.items": true,
+ "google.generativeai.types.SafetyRatingDict.keys": true,
+ "google.generativeai.types.SafetyRatingDict.pop": true,
+ "google.generativeai.types.SafetyRatingDict.popitem": true,
+ "google.generativeai.types.SafetyRatingDict.setdefault": true,
+ "google.generativeai.types.SafetyRatingDict.update": true,
+ "google.generativeai.types.SafetyRatingDict.values": true,
+ "google.generativeai.types.SafetySettingDict": false,
+ "google.generativeai.types.SafetySettingDict.__contains__": true,
+ "google.generativeai.types.SafetySettingDict.__eq__": true,
+ "google.generativeai.types.SafetySettingDict.__ge__": true,
+ "google.generativeai.types.SafetySettingDict.__getitem__": true,
+ "google.generativeai.types.SafetySettingDict.__gt__": true,
+ "google.generativeai.types.SafetySettingDict.__init__": true,
+ "google.generativeai.types.SafetySettingDict.__iter__": true,
+ "google.generativeai.types.SafetySettingDict.__le__": true,
+ "google.generativeai.types.SafetySettingDict.__len__": true,
+ "google.generativeai.types.SafetySettingDict.__lt__": true,
+ "google.generativeai.types.SafetySettingDict.__ne__": true,
+ "google.generativeai.types.SafetySettingDict.__new__": true,
+ "google.generativeai.types.SafetySettingDict.__or__": true,
+ "google.generativeai.types.SafetySettingDict.__ror__": true,
+ "google.generativeai.types.SafetySettingDict.clear": true,
+ "google.generativeai.types.SafetySettingDict.copy": true,
+ "google.generativeai.types.SafetySettingDict.fromkeys": true,
+ "google.generativeai.types.SafetySettingDict.get": true,
+ "google.generativeai.types.SafetySettingDict.items": true,
+ "google.generativeai.types.SafetySettingDict.keys": true,
+ "google.generativeai.types.SafetySettingDict.pop": true,
+ "google.generativeai.types.SafetySettingDict.popitem": true,
+ "google.generativeai.types.SafetySettingDict.setdefault": true,
+ "google.generativeai.types.SafetySettingDict.update": true,
+ "google.generativeai.types.SafetySettingDict.values": true,
+ "google.generativeai.types.Status": false,
+ "google.generativeai.types.Status.ByteSize": true,
+ "google.generativeai.types.Status.Clear": true,
+ "google.generativeai.types.Status.ClearExtension": true,
+ "google.generativeai.types.Status.ClearField": true,
+ "google.generativeai.types.Status.CopyFrom": true,
+ "google.generativeai.types.Status.DESCRIPTOR": true,
+ "google.generativeai.types.Status.DiscardUnknownFields": true,
+ "google.generativeai.types.Status.Extensions": true,
+ "google.generativeai.types.Status.FindInitializationErrors": true,
+ "google.generativeai.types.Status.FromString": true,
+ "google.generativeai.types.Status.HasExtension": true,
+ "google.generativeai.types.Status.HasField": true,
+ "google.generativeai.types.Status.IsInitialized": true,
+ "google.generativeai.types.Status.ListFields": true,
+ "google.generativeai.types.Status.MergeFrom": true,
+ "google.generativeai.types.Status.MergeFromString": true,
+ "google.generativeai.types.Status.ParseFromString": true,
+ "google.generativeai.types.Status.RegisterExtension": true,
+ "google.generativeai.types.Status.SerializePartialToString": true,
+ "google.generativeai.types.Status.SerializeToString": true,
+ "google.generativeai.types.Status.SetInParent": true,
+ "google.generativeai.types.Status.UnknownFields": true,
+ "google.generativeai.types.Status.WhichOneof": true,
+ "google.generativeai.types.Status.__eq__": true,
+ "google.generativeai.types.Status.__ge__": true,
+ "google.generativeai.types.Status.__gt__": true,
+ "google.generativeai.types.Status.__init__": true,
+ "google.generativeai.types.Status.__le__": true,
+ "google.generativeai.types.Status.__lt__": true,
+ "google.generativeai.types.Status.__ne__": true,
+ "google.generativeai.types.Status.__new__": true,
+ "google.generativeai.types.Status.code": true,
+ "google.generativeai.types.Status.details": true,
+ "google.generativeai.types.Status.message": true,
+ "google.generativeai.types.StopCandidateException": false,
+ "google.generativeai.types.StopCandidateException.__eq__": true,
+ "google.generativeai.types.StopCandidateException.__ge__": true,
+ "google.generativeai.types.StopCandidateException.__gt__": true,
+ "google.generativeai.types.StopCandidateException.__init__": true,
+ "google.generativeai.types.StopCandidateException.__le__": true,
+ "google.generativeai.types.StopCandidateException.__lt__": true,
+ "google.generativeai.types.StopCandidateException.__ne__": true,
+ "google.generativeai.types.StopCandidateException.__new__": true,
+ "google.generativeai.types.StopCandidateException.add_note": true,
+ "google.generativeai.types.StopCandidateException.args": true,
+ "google.generativeai.types.StopCandidateException.with_traceback": true,
+ "google.generativeai.types.StrictContentType": false,
+ "google.generativeai.types.Tool": false,
+ "google.generativeai.types.Tool.__call__": true,
+ "google.generativeai.types.Tool.__eq__": true,
+ "google.generativeai.types.Tool.__ge__": true,
+ "google.generativeai.types.Tool.__getitem__": true,
+ "google.generativeai.types.Tool.__gt__": true,
+ "google.generativeai.types.Tool.__init__": true,
+ "google.generativeai.types.Tool.__le__": true,
+ "google.generativeai.types.Tool.__lt__": true,
+ "google.generativeai.types.Tool.__ne__": true,
+ "google.generativeai.types.Tool.__new__": true,
+ "google.generativeai.types.Tool.code_execution": true,
+ "google.generativeai.types.Tool.function_declarations": true,
+ "google.generativeai.types.Tool.to_proto": true,
+ "google.generativeai.types.ToolDict": false,
+ "google.generativeai.types.ToolDict.__contains__": true,
+ "google.generativeai.types.ToolDict.__eq__": true,
+ "google.generativeai.types.ToolDict.__ge__": true,
+ "google.generativeai.types.ToolDict.__getitem__": true,
+ "google.generativeai.types.ToolDict.__gt__": true,
+ "google.generativeai.types.ToolDict.__init__": true,
+ "google.generativeai.types.ToolDict.__iter__": true,
+ "google.generativeai.types.ToolDict.__le__": true,
+ "google.generativeai.types.ToolDict.__len__": true,
+ "google.generativeai.types.ToolDict.__lt__": true,
+ "google.generativeai.types.ToolDict.__ne__": true,
+ "google.generativeai.types.ToolDict.__new__": true,
+ "google.generativeai.types.ToolDict.__or__": true,
+ "google.generativeai.types.ToolDict.__ror__": true,
+ "google.generativeai.types.ToolDict.clear": true,
+ "google.generativeai.types.ToolDict.copy": true,
+ "google.generativeai.types.ToolDict.fromkeys": true,
+ "google.generativeai.types.ToolDict.get": true,
+ "google.generativeai.types.ToolDict.items": true,
+ "google.generativeai.types.ToolDict.keys": true,
+ "google.generativeai.types.ToolDict.pop": true,
+ "google.generativeai.types.ToolDict.popitem": true,
+ "google.generativeai.types.ToolDict.setdefault": true,
+ "google.generativeai.types.ToolDict.update": true,
+ "google.generativeai.types.ToolDict.values": true,
+ "google.generativeai.types.ToolsType": false,
+ "google.generativeai.types.TunedModel": false,
+ "google.generativeai.types.TunedModel.__eq__": true,
+ "google.generativeai.types.TunedModel.__ge__": true,
+ "google.generativeai.types.TunedModel.__gt__": true,
+ "google.generativeai.types.TunedModel.__init__": true,
+ "google.generativeai.types.TunedModel.__le__": true,
+ "google.generativeai.types.TunedModel.__lt__": true,
+ "google.generativeai.types.TunedModel.__ne__": true,
+ "google.generativeai.types.TunedModel.__new__": true,
+ "google.generativeai.types.TunedModel.base_model": true,
+ "google.generativeai.types.TunedModel.create_time": true,
+ "google.generativeai.types.TunedModel.description": true,
+ "google.generativeai.types.TunedModel.display_name": true,
+ "google.generativeai.types.TunedModel.name": true,
+ "google.generativeai.types.TunedModel.permissions": true,
+ "google.generativeai.types.TunedModel.source_model": true,
+ "google.generativeai.types.TunedModel.state": true,
+ "google.generativeai.types.TunedModel.temperature": true,
+ "google.generativeai.types.TunedModel.top_k": true,
+ "google.generativeai.types.TunedModel.top_p": true,
+ "google.generativeai.types.TunedModel.tuning_task": true,
+ "google.generativeai.types.TunedModel.update_time": true,
+ "google.generativeai.types.TunedModelNameOptions": false,
+ "google.generativeai.types.TunedModelState": false,
+ "google.generativeai.types.TunedModelState.ACTIVE": true,
+ "google.generativeai.types.TunedModelState.CREATING": true,
+ "google.generativeai.types.TunedModelState.FAILED": true,
+ "google.generativeai.types.TunedModelState.STATE_UNSPECIFIED": true,
+ "google.generativeai.types.TunedModelState.__abs__": true,
+ "google.generativeai.types.TunedModelState.__add__": true,
+ "google.generativeai.types.TunedModelState.__and__": true,
+ "google.generativeai.types.TunedModelState.__bool__": true,
+ "google.generativeai.types.TunedModelState.__contains__": true,
+ "google.generativeai.types.TunedModelState.__eq__": true,
+ "google.generativeai.types.TunedModelState.__floordiv__": true,
+ "google.generativeai.types.TunedModelState.__ge__": true,
+ "google.generativeai.types.TunedModelState.__getitem__": true,
+ "google.generativeai.types.TunedModelState.__gt__": true,
+ "google.generativeai.types.TunedModelState.__init__": true,
+ "google.generativeai.types.TunedModelState.__invert__": true,
+ "google.generativeai.types.TunedModelState.__iter__": true,
+ "google.generativeai.types.TunedModelState.__le__": true,
+ "google.generativeai.types.TunedModelState.__len__": true,
+ "google.generativeai.types.TunedModelState.__lshift__": true,
+ "google.generativeai.types.TunedModelState.__lt__": true,
+ "google.generativeai.types.TunedModelState.__mod__": true,
+ "google.generativeai.types.TunedModelState.__mul__": true,
+ "google.generativeai.types.TunedModelState.__ne__": true,
+ "google.generativeai.types.TunedModelState.__neg__": true,
+ "google.generativeai.types.TunedModelState.__new__": true,
+ "google.generativeai.types.TunedModelState.__or__": true,
+ "google.generativeai.types.TunedModelState.__pos__": true,
+ "google.generativeai.types.TunedModelState.__pow__": true,
+ "google.generativeai.types.TunedModelState.__radd__": true,
+ "google.generativeai.types.TunedModelState.__rand__": true,
+ "google.generativeai.types.TunedModelState.__rfloordiv__": true,
+ "google.generativeai.types.TunedModelState.__rlshift__": true,
+ "google.generativeai.types.TunedModelState.__rmod__": true,
+ "google.generativeai.types.TunedModelState.__rmul__": true,
+ "google.generativeai.types.TunedModelState.__ror__": true,
+ "google.generativeai.types.TunedModelState.__rpow__": true,
+ "google.generativeai.types.TunedModelState.__rrshift__": true,
+ "google.generativeai.types.TunedModelState.__rshift__": true,
+ "google.generativeai.types.TunedModelState.__rsub__": true,
+ "google.generativeai.types.TunedModelState.__rtruediv__": true,
+ "google.generativeai.types.TunedModelState.__rxor__": true,
+ "google.generativeai.types.TunedModelState.__sub__": true,
+ "google.generativeai.types.TunedModelState.__truediv__": true,
+ "google.generativeai.types.TunedModelState.__xor__": true,
+ "google.generativeai.types.TunedModelState.as_integer_ratio": true,
+ "google.generativeai.types.TunedModelState.bit_count": true,
+ "google.generativeai.types.TunedModelState.bit_length": true,
+ "google.generativeai.types.TunedModelState.conjugate": true,
+ "google.generativeai.types.TunedModelState.denominator": true,
+ "google.generativeai.types.TunedModelState.from_bytes": true,
+ "google.generativeai.types.TunedModelState.imag": true,
+ "google.generativeai.types.TunedModelState.numerator": true,
+ "google.generativeai.types.TunedModelState.real": true,
+ "google.generativeai.types.TunedModelState.to_bytes": true,
+ "google.generativeai.types.TypedDict": false,
+ "google.generativeai.types.annotations": true,
+ "google.generativeai.types.get_default_file_client": false,
+ "google.generativeai.types.to_file_data": false,
+ "google.generativeai.update_tuned_model": false,
+ "google.generativeai.upload_file": false
+ },
+ "link_prefix": null,
+ "physical_path": {
+ "google.generativeai": "google.generativeai",
+ "google.generativeai.ChatSession": "google.generativeai.generative_models.ChatSession",
+ "google.generativeai.ChatSession.__init__": "google.generativeai.generative_models.ChatSession.__init__",
+ "google.generativeai.ChatSession.rewind": "google.generativeai.generative_models.ChatSession.rewind",
+ "google.generativeai.ChatSession.send_message": "google.generativeai.generative_models.ChatSession.send_message",
+ "google.generativeai.ChatSession.send_message_async": "google.generativeai.generative_models.ChatSession.send_message_async",
+ "google.generativeai.GenerativeModel": "google.generativeai.generative_models.GenerativeModel",
+ "google.generativeai.GenerativeModel.__init__": "google.generativeai.generative_models.GenerativeModel.__init__",
+ "google.generativeai.GenerativeModel.count_tokens": "google.generativeai.generative_models.GenerativeModel.count_tokens",
+ "google.generativeai.GenerativeModel.count_tokens_async": "google.generativeai.generative_models.GenerativeModel.count_tokens_async",
+ "google.generativeai.GenerativeModel.from_cached_content": "google.generativeai.generative_models.GenerativeModel.from_cached_content",
+ "google.generativeai.GenerativeModel.generate_content": "google.generativeai.generative_models.GenerativeModel.generate_content",
+ "google.generativeai.GenerativeModel.generate_content_async": "google.generativeai.generative_models.GenerativeModel.generate_content_async",
+ "google.generativeai.GenerativeModel.start_chat": "google.generativeai.generative_models.GenerativeModel.start_chat",
+ "google.generativeai.chat": "google.generativeai.discuss.chat",
+ "google.generativeai.chat_async": "google.generativeai.discuss.chat_async",
+ "google.generativeai.configure": "google.generativeai.client.configure",
+ "google.generativeai.count_message_tokens": "google.generativeai.discuss.count_message_tokens",
+ "google.generativeai.count_text_tokens": "google.generativeai.text.count_text_tokens",
+ "google.generativeai.create_tuned_model": "google.generativeai.models.create_tuned_model",
+ "google.generativeai.delete_file": "google.generativeai.files.delete_file",
+ "google.generativeai.delete_tuned_model": "google.generativeai.models.delete_tuned_model",
+ "google.generativeai.embed_content": "google.generativeai.embedding.embed_content",
+ "google.generativeai.embed_content_async": "google.generativeai.embedding.embed_content_async",
+ "google.generativeai.generate_embeddings": "google.generativeai.text.generate_embeddings",
+ "google.generativeai.generate_text": "google.generativeai.text.generate_text",
+ "google.generativeai.get_base_model": "google.generativeai.models.get_base_model",
+ "google.generativeai.get_file": "google.generativeai.files.get_file",
+ "google.generativeai.get_model": "google.generativeai.models.get_model",
+ "google.generativeai.get_operation": "google.generativeai.operations.get_operation",
+ "google.generativeai.get_tuned_model": "google.generativeai.models.get_tuned_model",
+ "google.generativeai.list_files": "google.generativeai.files.list_files",
+ "google.generativeai.list_models": "google.generativeai.models.list_models",
+ "google.generativeai.list_operations": "google.generativeai.operations.list_operations",
+ "google.generativeai.list_tuned_models": "google.generativeai.models.list_tuned_models",
+ "google.generativeai.protos": "google.generativeai.protos",
+ "google.generativeai.protos.AttributionSourceId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.GroundingPassageId",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": "proto.message.Message.__eq__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": "proto.message.Message.__init__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": "proto.message.Message.__ne__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.SemanticRetrieverChunk",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.AttributionSourceId.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.AttributionSourceId.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.AttributionSourceId.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.AttributionSourceId.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.AttributionSourceId.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.AttributionSourceId.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.AttributionSourceId.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.AttributionSourceId.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchCreateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksRequest",
+ "google.generativeai.protos.BatchCreateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchCreateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchCreateChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchCreateChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchCreateChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchCreateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchCreateChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchCreateChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchCreateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksResponse",
+ "google.generativeai.protos.BatchCreateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchCreateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchCreateChunksResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchCreateChunksResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchCreateChunksResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchCreateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchCreateChunksResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchCreateChunksResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchDeleteChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchDeleteChunksRequest",
+ "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchDeleteChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchDeleteChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchDeleteChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchDeleteChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedContentsRequest": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsRequest",
+ "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedContentsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedContentsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedContentsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedContentsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedContentsResponse": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsResponse",
+ "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedContentsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedContentsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedContentsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedContentsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextRequest",
+ "google.generativeai.protos.BatchEmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedTextRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedTextRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedTextRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedTextRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedTextRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextResponse",
+ "google.generativeai.protos.BatchEmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedTextResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedTextResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedTextResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedTextResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedTextResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchUpdateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksRequest",
+ "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchUpdateChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchUpdateChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchUpdateChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchUpdateChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchUpdateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksResponse",
+ "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchUpdateChunksResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchUpdateChunksResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchUpdateChunksResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchUpdateChunksResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Blob": "google.ai.generativelanguage_v1beta.types.content.Blob",
+ "google.generativeai.protos.Blob.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Blob.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Blob.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Blob.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Blob.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Blob.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Blob.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Blob.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CachedContent": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent",
+ "google.generativeai.protos.CachedContent.UsageMetadata": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent.UsageMetadata",
+ "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CachedContent.UsageMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CachedContent.UsageMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CachedContent.UsageMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CachedContent.UsageMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CachedContent.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CachedContent.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CachedContent.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CachedContent.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CachedContent.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CachedContent.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CachedContent.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CachedContent.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate",
+ "google.generativeai.protos.Candidate.FinishReason": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate.FinishReason",
+ "google.generativeai.protos.Candidate.FinishReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Candidate.FinishReason.__eq__": "proto.enums.Enum.__eq__",
+ "google.generativeai.protos.Candidate.FinishReason.__ge__": "proto.enums.Enum.__ge__",
+ "google.generativeai.protos.Candidate.FinishReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Candidate.FinishReason.__gt__": "proto.enums.Enum.__gt__",
+ "google.generativeai.protos.Candidate.FinishReason.__init__": "enum.Enum.__init__",
+ "google.generativeai.protos.Candidate.FinishReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Candidate.FinishReason.__le__": "proto.enums.Enum.__le__",
+ "google.generativeai.protos.Candidate.FinishReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Candidate.FinishReason.__lt__": "proto.enums.Enum.__lt__",
+ "google.generativeai.protos.Candidate.FinishReason.__ne__": "proto.enums.Enum.__ne__",
+ "google.generativeai.protos.Candidate.FinishReason.__new__": "enum.Enum.__new__",
+ "google.generativeai.protos.Candidate.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Candidate.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Candidate.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Candidate.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Candidate.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Candidate.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Candidate.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Candidate.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Chunk": "google.ai.generativelanguage_v1beta.types.retriever.Chunk",
+ "google.generativeai.protos.Chunk.State": "google.ai.generativelanguage_v1beta.types.retriever.Chunk.State",
+ "google.generativeai.protos.Chunk.State.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Chunk.State.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Chunk.State.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Chunk.State.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Chunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Chunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Chunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Chunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Chunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Chunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Chunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Chunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ChunkData": "google.ai.generativelanguage_v1beta.types.retriever.ChunkData",
+ "google.generativeai.protos.ChunkData.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ChunkData.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ChunkData.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ChunkData.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ChunkData.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ChunkData.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ChunkData.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ChunkData.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CitationMetadata": "google.ai.generativelanguage_v1beta.types.citation.CitationMetadata",
+ "google.generativeai.protos.CitationMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CitationMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CitationMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CitationMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CitationMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CitationMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CitationMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CitationMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CitationSource": "google.ai.generativelanguage_v1beta.types.citation.CitationSource",
+ "google.generativeai.protos.CitationSource.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CitationSource.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CitationSource.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CitationSource.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CitationSource.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CitationSource.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CitationSource.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CitationSource.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CodeExecution": "google.ai.generativelanguage_v1beta.types.content.CodeExecution",
+ "google.generativeai.protos.CodeExecution.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CodeExecution.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CodeExecution.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CodeExecution.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CodeExecution.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CodeExecution.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CodeExecution.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CodeExecution.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CodeExecutionResult": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult",
+ "google.generativeai.protos.CodeExecutionResult.Outcome": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult.Outcome",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.CodeExecutionResult.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CodeExecutionResult.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CodeExecutionResult.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CodeExecutionResult.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CodeExecutionResult.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CodeExecutionResult.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CodeExecutionResult.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CodeExecutionResult.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Condition": "google.ai.generativelanguage_v1beta.types.retriever.Condition",
+ "google.generativeai.protos.Condition.Operator": "google.ai.generativelanguage_v1beta.types.retriever.Condition.Operator",
+ "google.generativeai.protos.Condition.Operator.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Condition.Operator.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Condition.Operator.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Condition.Operator.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Condition.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Condition.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Condition.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Condition.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Condition.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Condition.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Condition.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Condition.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Content": "google.ai.generativelanguage_v1beta.types.content.Content",
+ "google.generativeai.protos.Content.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Content.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Content.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Content.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Content.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Content.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Content.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Content.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ContentEmbedding": "google.ai.generativelanguage_v1beta.types.generative_service.ContentEmbedding",
+ "google.generativeai.protos.ContentEmbedding.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ContentEmbedding.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ContentEmbedding.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ContentEmbedding.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ContentEmbedding.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ContentEmbedding.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ContentEmbedding.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ContentEmbedding.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ContentFilter": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter",
+ "google.generativeai.protos.ContentFilter.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ContentFilter.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ContentFilter.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ContentFilter.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ContentFilter.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ContentFilter.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ContentFilter.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ContentFilter.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Corpus": "google.ai.generativelanguage_v1beta.types.retriever.Corpus",
+ "google.generativeai.protos.Corpus.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Corpus.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Corpus.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Corpus.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Corpus.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Corpus.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Corpus.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Corpus.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountMessageTokensRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensRequest",
+ "google.generativeai.protos.CountMessageTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountMessageTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountMessageTokensRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountMessageTokensRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountMessageTokensRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountMessageTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountMessageTokensRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountMessageTokensRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountMessageTokensResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensResponse",
+ "google.generativeai.protos.CountMessageTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountMessageTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountMessageTokensResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountMessageTokensResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountMessageTokensResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountMessageTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountMessageTokensResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountMessageTokensResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTextTokensRequest": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensRequest",
+ "google.generativeai.protos.CountTextTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTextTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTextTokensRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTextTokensRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTextTokensRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTextTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTextTokensRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTextTokensRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTextTokensResponse": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensResponse",
+ "google.generativeai.protos.CountTextTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTextTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTextTokensResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTextTokensResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTextTokensResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTextTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTextTokensResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTextTokensResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTokensRequest": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensRequest",
+ "google.generativeai.protos.CountTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTokensRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTokensRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTokensRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTokensRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTokensRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTokensResponse": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensResponse",
+ "google.generativeai.protos.CountTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTokensResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTokensResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTokensResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTokensResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTokensResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.CreateCachedContentRequest",
+ "google.generativeai.protos.CreateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateChunkRequest",
+ "google.generativeai.protos.CreateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateCorpusRequest",
+ "google.generativeai.protos.CreateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateDocumentRequest",
+ "google.generativeai.protos.CreateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileRequest",
+ "google.generativeai.protos.CreateFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateFileRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateFileRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateFileRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateFileRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateFileRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateFileResponse": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileResponse",
+ "google.generativeai.protos.CreateFileResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateFileResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateFileResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateFileResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateFileResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateFileResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateFileResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateFileResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.CreatePermissionRequest",
+ "google.generativeai.protos.CreatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreatePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreatePermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreatePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreatePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreatePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateTunedModelMetadata": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelMetadata",
+ "google.generativeai.protos.CreateTunedModelMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateTunedModelMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateTunedModelMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateTunedModelMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateTunedModelMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateTunedModelMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateTunedModelMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateTunedModelMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelRequest",
+ "google.generativeai.protos.CreateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CustomMetadata": "google.ai.generativelanguage_v1beta.types.retriever.CustomMetadata",
+ "google.generativeai.protos.CustomMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CustomMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CustomMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CustomMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CustomMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CustomMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CustomMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CustomMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Dataset": "google.ai.generativelanguage_v1beta.types.tuned_model.Dataset",
+ "google.generativeai.protos.Dataset.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Dataset.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Dataset.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Dataset.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Dataset.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Dataset.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Dataset.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Dataset.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.DeleteCachedContentRequest",
+ "google.generativeai.protos.DeleteCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteChunkRequest",
+ "google.generativeai.protos.DeleteChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteCorpusRequest",
+ "google.generativeai.protos.DeleteCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteDocumentRequest",
+ "google.generativeai.protos.DeleteDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.DeleteFileRequest",
+ "google.generativeai.protos.DeleteFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteFileRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteFileRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteFileRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteFileRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteFileRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeletePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.DeletePermissionRequest",
+ "google.generativeai.protos.DeletePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeletePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeletePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeletePermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeletePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeletePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeletePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeletePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.DeleteTunedModelRequest",
+ "google.generativeai.protos.DeleteTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Document": "google.ai.generativelanguage_v1beta.types.retriever.Document",
+ "google.generativeai.protos.Document.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Document.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Document.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Document.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Document.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Document.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Document.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Document.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentRequest",
+ "google.generativeai.protos.EmbedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentResponse",
+ "google.generativeai.protos.EmbedContentResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedContentResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedContentResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedContentResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedContentResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedContentResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedContentResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedContentResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextRequest",
+ "google.generativeai.protos.EmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedTextRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedTextRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedTextRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedTextRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedTextRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextResponse",
+ "google.generativeai.protos.EmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedTextResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedTextResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedTextResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedTextResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedTextResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Embedding": "google.ai.generativelanguage_v1beta.types.text_service.Embedding",
+ "google.generativeai.protos.Embedding.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Embedding.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Embedding.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Embedding.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Embedding.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Embedding.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Embedding.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Embedding.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Example": "google.ai.generativelanguage_v1beta.types.discuss_service.Example",
+ "google.generativeai.protos.Example.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Example.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Example.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Example.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Example.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Example.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Example.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Example.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ExecutableCode": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode",
+ "google.generativeai.protos.ExecutableCode.Language": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode.Language",
+ "google.generativeai.protos.ExecutableCode.Language.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.ExecutableCode.Language.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.ExecutableCode.Language.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.ExecutableCode.Language.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.ExecutableCode.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ExecutableCode.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ExecutableCode.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ExecutableCode.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ExecutableCode.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ExecutableCode.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ExecutableCode.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ExecutableCode.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.File": "google.ai.generativelanguage_v1beta.types.file.File",
+ "google.generativeai.protos.File.State": "google.ai.generativelanguage_v1beta.types.file.File.State",
+ "google.generativeai.protos.File.State.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.File.State.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.File.State.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.File.State.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.File.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.File.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.File.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.File.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.File.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.File.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.File.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.File.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FileData": "google.ai.generativelanguage_v1beta.types.content.FileData",
+ "google.generativeai.protos.FileData.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FileData.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FileData.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FileData.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FileData.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FileData.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FileData.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FileData.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionCall": "google.ai.generativelanguage_v1beta.types.content.FunctionCall",
+ "google.generativeai.protos.FunctionCall.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionCall.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionCall.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionCall.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionCall.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionCall.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionCall.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionCall.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionCallingConfig": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig",
+ "google.generativeai.protos.FunctionCallingConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig.Mode",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.FunctionCallingConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionCallingConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionCallingConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionCallingConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionCallingConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionCallingConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionCallingConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionCallingConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionDeclaration": "google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration",
+ "google.generativeai.protos.FunctionDeclaration.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionDeclaration.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionDeclaration.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionDeclaration.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionDeclaration.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionDeclaration.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionDeclaration.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionDeclaration.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionResponse": "google.ai.generativelanguage_v1beta.types.content.FunctionResponse",
+ "google.generativeai.protos.FunctionResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateAnswerRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest.AnswerStyle",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.GenerateAnswerRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateAnswerRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateAnswerRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateAnswerRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateAnswerRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateAnswerRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateAnswerRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateAnswerRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateAnswerResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback.BlockReason",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateAnswerResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateAnswerResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateAnswerResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateAnswerResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateAnswerResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateAnswerResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateAnswerResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateAnswerResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentRequest",
+ "google.generativeai.protos.GenerateContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback.BlockReason",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.UsageMetadata",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateMessageRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageRequest",
+ "google.generativeai.protos.GenerateMessageRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateMessageRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateMessageRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateMessageRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateMessageRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateMessageRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateMessageRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateMessageRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateMessageResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageResponse",
+ "google.generativeai.protos.GenerateMessageResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateMessageResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateMessageResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateMessageResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateMessageResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateMessageResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateMessageResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateMessageResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextRequest",
+ "google.generativeai.protos.GenerateTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateTextRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateTextRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateTextRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateTextRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateTextRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextResponse",
+ "google.generativeai.protos.GenerateTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateTextResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateTextResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateTextResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateTextResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateTextResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerationConfig": "google.ai.generativelanguage_v1beta.types.generative_service.GenerationConfig",
+ "google.generativeai.protos.GenerationConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerationConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerationConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerationConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerationConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerationConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerationConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerationConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.GetCachedContentRequest",
+ "google.generativeai.protos.GetCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetChunkRequest",
+ "google.generativeai.protos.GetChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetCorpusRequest",
+ "google.generativeai.protos.GetCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetDocumentRequest",
+ "google.generativeai.protos.GetDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.GetFileRequest",
+ "google.generativeai.protos.GetFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetFileRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetFileRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetFileRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetFileRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetFileRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetModelRequest",
+ "google.generativeai.protos.GetModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetPermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.GetPermissionRequest",
+ "google.generativeai.protos.GetPermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetPermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetPermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetPermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetPermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetPermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetPermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetPermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetTunedModelRequest",
+ "google.generativeai.protos.GetTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingAttribution": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingAttribution",
+ "google.generativeai.protos.GroundingAttribution.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingAttribution.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingAttribution.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingAttribution.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingAttribution.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingAttribution.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingAttribution.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingAttribution.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingPassage": "google.ai.generativelanguage_v1beta.types.content.GroundingPassage",
+ "google.generativeai.protos.GroundingPassage.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingPassage.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingPassage.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingPassage.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingPassage.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingPassage.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingPassage.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingPassage.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingPassages": "google.ai.generativelanguage_v1beta.types.content.GroundingPassages",
+ "google.generativeai.protos.GroundingPassages.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingPassages.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingPassages.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingPassages.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingPassages.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingPassages.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingPassages.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingPassages.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.HarmCategory": "google.ai.generativelanguage_v1beta.types.safety.HarmCategory",
+ "google.generativeai.protos.HarmCategory.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.HarmCategory.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.HarmCategory.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.HarmCategory.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Hyperparameters": "google.ai.generativelanguage_v1beta.types.tuned_model.Hyperparameters",
+ "google.generativeai.protos.Hyperparameters.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Hyperparameters.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Hyperparameters.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Hyperparameters.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Hyperparameters.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Hyperparameters.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Hyperparameters.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Hyperparameters.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCachedContentsRequest": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsRequest",
+ "google.generativeai.protos.ListCachedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCachedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCachedContentsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCachedContentsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCachedContentsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCachedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCachedContentsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCachedContentsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCachedContentsResponse": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsResponse",
+ "google.generativeai.protos.ListCachedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCachedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCachedContentsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCachedContentsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCachedContentsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCachedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCachedContentsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCachedContentsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksRequest",
+ "google.generativeai.protos.ListChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksResponse",
+ "google.generativeai.protos.ListChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListChunksResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListChunksResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListChunksResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListChunksResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListChunksResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCorporaRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaRequest",
+ "google.generativeai.protos.ListCorporaRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCorporaRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCorporaRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCorporaRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCorporaRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCorporaRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCorporaRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCorporaRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCorporaResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaResponse",
+ "google.generativeai.protos.ListCorporaResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCorporaResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCorporaResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCorporaResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCorporaResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCorporaResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCorporaResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCorporaResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListDocumentsRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsRequest",
+ "google.generativeai.protos.ListDocumentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListDocumentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListDocumentsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListDocumentsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListDocumentsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListDocumentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListDocumentsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListDocumentsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListDocumentsResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsResponse",
+ "google.generativeai.protos.ListDocumentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListDocumentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListDocumentsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListDocumentsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListDocumentsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListDocumentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListDocumentsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListDocumentsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListFilesRequest": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesRequest",
+ "google.generativeai.protos.ListFilesRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListFilesRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListFilesRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListFilesRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListFilesRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListFilesRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListFilesRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListFilesRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListFilesResponse": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesResponse",
+ "google.generativeai.protos.ListFilesResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListFilesResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListFilesResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListFilesResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListFilesResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListFilesResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListFilesResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListFilesResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsRequest",
+ "google.generativeai.protos.ListModelsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListModelsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListModelsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListModelsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListModelsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListModelsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListModelsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListModelsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsResponse",
+ "google.generativeai.protos.ListModelsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListModelsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListModelsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListModelsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListModelsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListModelsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListModelsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListModelsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListPermissionsRequest": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsRequest",
+ "google.generativeai.protos.ListPermissionsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListPermissionsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListPermissionsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListPermissionsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListPermissionsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListPermissionsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListPermissionsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListPermissionsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListPermissionsResponse": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsResponse",
+ "google.generativeai.protos.ListPermissionsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListPermissionsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListPermissionsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListPermissionsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListPermissionsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListPermissionsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListPermissionsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListPermissionsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListTunedModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsRequest",
+ "google.generativeai.protos.ListTunedModelsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListTunedModelsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListTunedModelsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListTunedModelsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListTunedModelsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListTunedModelsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListTunedModelsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListTunedModelsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListTunedModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsResponse",
+ "google.generativeai.protos.ListTunedModelsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListTunedModelsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListTunedModelsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListTunedModelsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListTunedModelsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListTunedModelsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListTunedModelsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListTunedModelsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Message": "google.ai.generativelanguage_v1beta.types.discuss_service.Message",
+ "google.generativeai.protos.Message.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Message.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Message.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Message.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Message.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Message.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Message.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Message.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.MessagePrompt": "google.ai.generativelanguage_v1beta.types.discuss_service.MessagePrompt",
+ "google.generativeai.protos.MessagePrompt.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.MessagePrompt.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.MessagePrompt.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.MessagePrompt.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.MessagePrompt.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.MessagePrompt.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.MessagePrompt.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.MessagePrompt.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.MetadataFilter": "google.ai.generativelanguage_v1beta.types.retriever.MetadataFilter",
+ "google.generativeai.protos.MetadataFilter.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.MetadataFilter.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.MetadataFilter.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.MetadataFilter.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.MetadataFilter.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.MetadataFilter.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.MetadataFilter.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.MetadataFilter.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Model": "google.ai.generativelanguage_v1beta.types.model.Model",
+ "google.generativeai.protos.Model.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Model.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Model.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Model.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Model.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Model.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Model.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Model.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Part": "google.ai.generativelanguage_v1beta.types.content.Part",
+ "google.generativeai.protos.Part.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Part.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Part.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Part.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Part.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Part.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Part.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Part.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Permission": "google.ai.generativelanguage_v1beta.types.permission.Permission",
+ "google.generativeai.protos.Permission.GranteeType": "google.ai.generativelanguage_v1beta.types.permission.Permission.GranteeType",
+ "google.generativeai.protos.Permission.GranteeType.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Permission.GranteeType.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Permission.GranteeType.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Permission.GranteeType.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Permission.Role": "google.ai.generativelanguage_v1beta.types.permission.Permission.Role",
+ "google.generativeai.protos.Permission.Role.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Permission.Role.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Permission.Role.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Permission.Role.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Permission.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Permission.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Permission.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Permission.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Permission.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Permission.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Permission.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Permission.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusRequest",
+ "google.generativeai.protos.QueryCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryCorpusResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusResponse",
+ "google.generativeai.protos.QueryCorpusResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryCorpusResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryCorpusResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryCorpusResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryCorpusResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryCorpusResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryCorpusResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryCorpusResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentRequest",
+ "google.generativeai.protos.QueryDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryDocumentResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentResponse",
+ "google.generativeai.protos.QueryDocumentResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryDocumentResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryDocumentResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryDocumentResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryDocumentResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryDocumentResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryDocumentResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryDocumentResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.RelevantChunk": "google.ai.generativelanguage_v1beta.types.retriever_service.RelevantChunk",
+ "google.generativeai.protos.RelevantChunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.RelevantChunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.RelevantChunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.RelevantChunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.RelevantChunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.RelevantChunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.RelevantChunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.RelevantChunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SafetyFeedback": "google.ai.generativelanguage_v1beta.types.safety.SafetyFeedback",
+ "google.generativeai.protos.SafetyFeedback.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SafetyFeedback.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SafetyFeedback.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SafetyFeedback.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SafetyFeedback.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SafetyFeedback.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SafetyFeedback.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SafetyFeedback.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SafetyRating": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating",
+ "google.generativeai.protos.SafetyRating.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SafetyRating.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SafetyRating.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SafetyRating.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SafetyRating.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SafetyRating.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SafetyRating.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SafetyRating.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SafetySetting": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting",
+ "google.generativeai.protos.SafetySetting.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SafetySetting.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SafetySetting.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SafetySetting.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SafetySetting.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SafetySetting.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SafetySetting.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SafetySetting.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Schema": "google.ai.generativelanguage_v1beta.types.content.Schema",
+ "google.generativeai.protos.Schema.PropertiesEntry": "google.ai.generativelanguage_v1beta.types.content.Schema.PropertiesEntry",
+ "google.generativeai.protos.Schema.PropertiesEntry.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Schema.PropertiesEntry.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Schema.PropertiesEntry.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Schema.PropertiesEntry.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Schema.PropertiesEntry.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Schema.PropertiesEntry.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Schema.PropertiesEntry.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Schema.PropertiesEntry.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Schema.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Schema.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Schema.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Schema.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Schema.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Schema.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Schema.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Schema.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SemanticRetrieverConfig": "google.ai.generativelanguage_v1beta.types.generative_service.SemanticRetrieverConfig",
+ "google.generativeai.protos.SemanticRetrieverConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SemanticRetrieverConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SemanticRetrieverConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SemanticRetrieverConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SemanticRetrieverConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SemanticRetrieverConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SemanticRetrieverConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SemanticRetrieverConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.StringList": "google.ai.generativelanguage_v1beta.types.retriever.StringList",
+ "google.generativeai.protos.StringList.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.StringList.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.StringList.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.StringList.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.StringList.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.StringList.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.StringList.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.StringList.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TaskType": "google.ai.generativelanguage_v1beta.types.generative_service.TaskType",
+ "google.generativeai.protos.TaskType.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.TaskType.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.TaskType.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.TaskType.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.TextCompletion": "google.ai.generativelanguage_v1beta.types.text_service.TextCompletion",
+ "google.generativeai.protos.TextCompletion.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TextCompletion.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TextCompletion.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TextCompletion.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TextCompletion.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TextCompletion.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TextCompletion.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TextCompletion.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TextPrompt": "google.ai.generativelanguage_v1beta.types.text_service.TextPrompt",
+ "google.generativeai.protos.TextPrompt.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TextPrompt.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TextPrompt.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TextPrompt.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TextPrompt.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TextPrompt.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TextPrompt.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TextPrompt.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Tool": "google.ai.generativelanguage_v1beta.types.content.Tool",
+ "google.generativeai.protos.Tool.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Tool.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Tool.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Tool.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Tool.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Tool.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Tool.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Tool.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ToolConfig": "google.ai.generativelanguage_v1beta.types.content.ToolConfig",
+ "google.generativeai.protos.ToolConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ToolConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ToolConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ToolConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ToolConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ToolConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ToolConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ToolConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TransferOwnershipRequest": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipRequest",
+ "google.generativeai.protos.TransferOwnershipRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TransferOwnershipRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TransferOwnershipRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TransferOwnershipRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TransferOwnershipRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TransferOwnershipRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TransferOwnershipRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TransferOwnershipRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TransferOwnershipResponse": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipResponse",
+ "google.generativeai.protos.TransferOwnershipResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TransferOwnershipResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TransferOwnershipResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TransferOwnershipResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TransferOwnershipResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TransferOwnershipResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TransferOwnershipResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TransferOwnershipResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TunedModel": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel",
+ "google.generativeai.protos.TunedModel.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TunedModel.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TunedModel.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TunedModel.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TunedModel.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TunedModel.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TunedModel.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TunedModel.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TunedModelSource": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModelSource",
+ "google.generativeai.protos.TunedModelSource.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TunedModelSource.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TunedModelSource.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TunedModelSource.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TunedModelSource.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TunedModelSource.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TunedModelSource.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TunedModelSource.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningExample": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExample",
+ "google.generativeai.protos.TuningExample.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningExample.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningExample.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningExample.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningExample.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningExample.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningExample.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningExample.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningExamples": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExamples",
+ "google.generativeai.protos.TuningExamples.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningExamples.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningExamples.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningExamples.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningExamples.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningExamples.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningExamples.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningExamples.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningSnapshot": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningSnapshot",
+ "google.generativeai.protos.TuningSnapshot.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningSnapshot.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningSnapshot.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningSnapshot.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningSnapshot.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningSnapshot.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningSnapshot.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningSnapshot.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningTask": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningTask",
+ "google.generativeai.protos.TuningTask.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningTask.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningTask.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningTask.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningTask.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningTask.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningTask.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningTask.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Type": "google.ai.generativelanguage_v1beta.types.content.Type",
+ "google.generativeai.protos.Type.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Type.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Type.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Type.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.UpdateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.UpdateCachedContentRequest",
+ "google.generativeai.protos.UpdateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateChunkRequest",
+ "google.generativeai.protos.UpdateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateCorpusRequest",
+ "google.generativeai.protos.UpdateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateDocumentRequest",
+ "google.generativeai.protos.UpdateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.UpdatePermissionRequest",
+ "google.generativeai.protos.UpdatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdatePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdatePermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdatePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdatePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdatePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.UpdateTunedModelRequest",
+ "google.generativeai.protos.UpdateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.VideoMetadata": "google.ai.generativelanguage_v1beta.types.file.VideoMetadata",
+ "google.generativeai.protos.VideoMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.VideoMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.VideoMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.VideoMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.VideoMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.VideoMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.VideoMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.VideoMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.types": "google.generativeai.types",
+ "google.generativeai.types.AsyncGenerateContentResponse": "google.generativeai.types.generation_types.AsyncGenerateContentResponse",
+ "google.generativeai.types.AsyncGenerateContentResponse.__init__": "google.generativeai.types.generation_types.BaseGenerateContentResponse.__init__",
+ "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_aiterator",
+ "google.generativeai.types.AsyncGenerateContentResponse.from_response": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_response",
+ "google.generativeai.types.AsyncGenerateContentResponse.resolve": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.resolve",
+ "google.generativeai.types.AsyncGenerateContentResponse.to_dict": "google.generativeai.types.generation_types.BaseGenerateContentResponse.to_dict",
+ "google.generativeai.types.AuthorError": "google.generativeai.types.discuss_types.AuthorError",
+ "google.generativeai.types.BlobDict": "google.generativeai.types.content_types.BlobDict",
+ "google.generativeai.types.BlockedPromptException": "google.generativeai.types.generation_types.BlockedPromptException",
+ "google.generativeai.types.BlockedReason": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter.BlockedReason",
+ "google.generativeai.types.BlockedReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.BlockedReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.BlockedReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.BlockedReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.BrokenResponseError": "google.generativeai.types.generation_types.BrokenResponseError",
+ "google.generativeai.types.CallableFunctionDeclaration": "google.generativeai.types.content_types.CallableFunctionDeclaration",
+ "google.generativeai.types.CallableFunctionDeclaration.__call__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__call__",
+ "google.generativeai.types.CallableFunctionDeclaration.__init__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__init__",
+ "google.generativeai.types.CallableFunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto",
+ "google.generativeai.types.ChatResponse": "google.generativeai.types.discuss_types.ChatResponse",
+ "google.generativeai.types.ChatResponse.__eq__": "google.generativeai.types.discuss_types.ChatResponse.__eq__",
+ "google.generativeai.types.ChatResponse.reply": "google.generativeai.types.discuss_types.ChatResponse.reply",
+ "google.generativeai.types.ChatResponse.to_dict": "google.generativeai.types.discuss_types.ChatResponse.to_dict",
+ "google.generativeai.types.CitationMetadataDict": "google.generativeai.types.citation_types.CitationMetadataDict",
+ "google.generativeai.types.CitationSourceDict": "google.generativeai.types.citation_types.CitationSourceDict",
+ "google.generativeai.types.Completion": "google.generativeai.types.text_types.Completion",
+ "google.generativeai.types.Completion.__eq__": "google.generativeai.types.text_types.Completion.__eq__",
+ "google.generativeai.types.Completion.to_dict": "google.generativeai.types.text_types.Completion.to_dict",
+ "google.generativeai.types.ContentDict": "google.generativeai.types.content_types.ContentDict",
+ "google.generativeai.types.ContentFilterDict": "google.generativeai.types.safety_types.ContentFilterDict",
+ "google.generativeai.types.ExampleDict": "google.generativeai.types.discuss_types.ExampleDict",
+ "google.generativeai.types.File": "google.generativeai.types.file_types.File",
+ "google.generativeai.types.File.__init__": "google.generativeai.types.file_types.File.__init__",
+ "google.generativeai.types.File.delete": "google.generativeai.types.file_types.File.delete",
+ "google.generativeai.types.File.to_dict": "google.generativeai.types.file_types.File.to_dict",
+ "google.generativeai.types.File.to_proto": "google.generativeai.types.file_types.File.to_proto",
+ "google.generativeai.types.FileDataDict": "google.generativeai.types.file_types.FileDataDict",
+ "google.generativeai.types.FunctionDeclaration": "google.generativeai.types.content_types.FunctionDeclaration",
+ "google.generativeai.types.FunctionDeclaration.__init__": "google.generativeai.types.content_types.FunctionDeclaration.__init__",
+ "google.generativeai.types.FunctionDeclaration.from_function": "google.generativeai.types.content_types.FunctionDeclaration.from_function",
+ "google.generativeai.types.FunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto",
+ "google.generativeai.types.FunctionDeclaration.to_proto": "google.generativeai.types.content_types.FunctionDeclaration.to_proto",
+ "google.generativeai.types.FunctionLibrary": "google.generativeai.types.content_types.FunctionLibrary",
+ "google.generativeai.types.FunctionLibrary.__call__": "google.generativeai.types.content_types.FunctionLibrary.__call__",
+ "google.generativeai.types.FunctionLibrary.__getitem__": "google.generativeai.types.content_types.FunctionLibrary.__getitem__",
+ "google.generativeai.types.FunctionLibrary.__init__": "google.generativeai.types.content_types.FunctionLibrary.__init__",
+ "google.generativeai.types.FunctionLibrary.to_proto": "google.generativeai.types.content_types.FunctionLibrary.to_proto",
+ "google.generativeai.types.GenerateContentResponse": "google.generativeai.types.generation_types.GenerateContentResponse",
+ "google.generativeai.types.GenerateContentResponse.__iter__": "google.generativeai.types.generation_types.GenerateContentResponse.__iter__",
+ "google.generativeai.types.GenerateContentResponse.from_iterator": "google.generativeai.types.generation_types.GenerateContentResponse.from_iterator",
+ "google.generativeai.types.GenerateContentResponse.from_response": "google.generativeai.types.generation_types.GenerateContentResponse.from_response",
+ "google.generativeai.types.GenerateContentResponse.resolve": "google.generativeai.types.generation_types.GenerateContentResponse.resolve",
+ "google.generativeai.types.GenerationConfig": "google.generativeai.types.generation_types.GenerationConfig",
+ "google.generativeai.types.GenerationConfig.__eq__": "google.generativeai.types.generation_types.GenerationConfig.__eq__",
+ "google.generativeai.types.GenerationConfig.__init__": "google.generativeai.types.generation_types.GenerationConfig.__init__",
+ "google.generativeai.types.GenerationConfigDict": "google.generativeai.types.generation_types.GenerationConfigDict",
+ "google.generativeai.types.HarmBlockThreshold": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting.HarmBlockThreshold",
+ "google.generativeai.types.HarmBlockThreshold.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.HarmBlockThreshold.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.HarmBlockThreshold.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.HarmBlockThreshold.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.HarmCategory": "google.generativeai.types.safety_types.HarmCategory",
+ "google.generativeai.types.HarmCategory.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.HarmCategory.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.HarmCategory.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.HarmCategory.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.HarmProbability": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating.HarmProbability",
+ "google.generativeai.types.HarmProbability.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.HarmProbability.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.HarmProbability.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.HarmProbability.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.IncompleteIterationError": "google.generativeai.types.generation_types.IncompleteIterationError",
+ "google.generativeai.types.MessageDict": "google.generativeai.types.discuss_types.MessageDict",
+ "google.generativeai.types.MessagePromptDict": "google.generativeai.types.discuss_types.MessagePromptDict",
+ "google.generativeai.types.Model": "google.generativeai.types.model_types.Model",
+ "google.generativeai.types.Model.__eq__": "google.generativeai.types.model_types.Model.__eq__",
+ "google.generativeai.types.Model.__init__": "google.generativeai.types.model_types.Model.__init__",
+ "google.generativeai.types.PartDict": "google.generativeai.types.content_types.PartDict",
+ "google.generativeai.types.Permission": "google.generativeai.types.permission_types.Permission",
+ "google.generativeai.types.Permission.__eq__": "google.generativeai.types.permission_types.Permission.__eq__",
+ "google.generativeai.types.Permission.__init__": "google.generativeai.types.permission_types.Permission.__init__",
+ "google.generativeai.types.Permission.delete": "google.generativeai.types.permission_types.Permission.delete",
+ "google.generativeai.types.Permission.delete_async": "google.generativeai.types.permission_types.Permission.delete_async",
+ "google.generativeai.types.Permission.get": "google.generativeai.types.permission_types.Permission.get",
+ "google.generativeai.types.Permission.get_async": "google.generativeai.types.permission_types.Permission.get_async",
+ "google.generativeai.types.Permission.to_dict": "google.generativeai.types.permission_types.Permission.to_dict",
+ "google.generativeai.types.Permission.update": "google.generativeai.types.permission_types.Permission.update",
+ "google.generativeai.types.Permission.update_async": "google.generativeai.types.permission_types.Permission.update_async",
+ "google.generativeai.types.Permissions": "google.generativeai.types.permission_types.Permissions",
+ "google.generativeai.types.Permissions.__init__": "google.generativeai.types.permission_types.Permissions.__init__",
+ "google.generativeai.types.Permissions.__iter__": "google.generativeai.types.permission_types.Permissions.__iter__",
+ "google.generativeai.types.Permissions.create": "google.generativeai.types.permission_types.Permissions.create",
+ "google.generativeai.types.Permissions.create_async": "google.generativeai.types.permission_types.Permissions.create_async",
+ "google.generativeai.types.Permissions.get": "google.generativeai.types.permission_types.Permissions.get",
+ "google.generativeai.types.Permissions.get_async": "google.generativeai.types.permission_types.Permissions.get_async",
+ "google.generativeai.types.Permissions.list": "google.generativeai.types.permission_types.Permissions.list",
+ "google.generativeai.types.Permissions.list_async": "google.generativeai.types.permission_types.Permissions.list_async",
+ "google.generativeai.types.Permissions.transfer_ownership": "google.generativeai.types.permission_types.Permissions.transfer_ownership",
+ "google.generativeai.types.Permissions.transfer_ownership_async": "google.generativeai.types.permission_types.Permissions.transfer_ownership_async",
+ "google.generativeai.types.RequestOptions": "google.generativeai.types.helper_types.RequestOptions",
+ "google.generativeai.types.RequestOptions.__contains__": "collections.abc.Mapping.__contains__",
+ "google.generativeai.types.RequestOptions.__eq__": "google.generativeai.types.helper_types.RequestOptions.__eq__",
+ "google.generativeai.types.RequestOptions.__getitem__": "google.generativeai.types.helper_types.RequestOptions.__getitem__",
+ "google.generativeai.types.RequestOptions.__init__": "google.generativeai.types.helper_types.RequestOptions.__init__",
+ "google.generativeai.types.RequestOptions.__iter__": "google.generativeai.types.helper_types.RequestOptions.__iter__",
+ "google.generativeai.types.RequestOptions.__len__": "google.generativeai.types.helper_types.RequestOptions.__len__",
+ "google.generativeai.types.RequestOptions.get": "collections.abc.Mapping.get",
+ "google.generativeai.types.RequestOptions.items": "collections.abc.Mapping.items",
+ "google.generativeai.types.RequestOptions.keys": "collections.abc.Mapping.keys",
+ "google.generativeai.types.RequestOptions.values": "collections.abc.Mapping.values",
+ "google.generativeai.types.ResponseDict": "google.generativeai.types.discuss_types.ResponseDict",
+ "google.generativeai.types.SafetyFeedbackDict": "google.generativeai.types.safety_types.SafetyFeedbackDict",
+ "google.generativeai.types.SafetyRatingDict": "google.generativeai.types.safety_types.SafetyRatingDict",
+ "google.generativeai.types.SafetySettingDict": "google.generativeai.types.safety_types.SafetySettingDict",
+ "google.generativeai.types.Status": "google.rpc.status_pb2.Status",
+ "google.generativeai.types.Status.RegisterExtension": "google.protobuf.message.Message.RegisterExtension",
+ "google.generativeai.types.StopCandidateException": "google.generativeai.types.generation_types.StopCandidateException",
+ "google.generativeai.types.Tool": "google.generativeai.types.content_types.Tool",
+ "google.generativeai.types.Tool.__call__": "google.generativeai.types.content_types.Tool.__call__",
+ "google.generativeai.types.Tool.__getitem__": "google.generativeai.types.content_types.Tool.__getitem__",
+ "google.generativeai.types.Tool.__init__": "google.generativeai.types.content_types.Tool.__init__",
+ "google.generativeai.types.Tool.to_proto": "google.generativeai.types.content_types.Tool.to_proto",
+ "google.generativeai.types.ToolDict": "google.generativeai.types.content_types.ToolDict",
+ "google.generativeai.types.TunedModel": "google.generativeai.types.model_types.TunedModel",
+ "google.generativeai.types.TunedModel.__eq__": "google.generativeai.types.model_types.TunedModel.__eq__",
+ "google.generativeai.types.TunedModel.__init__": "google.generativeai.types.model_types.TunedModel.__init__",
+ "google.generativeai.types.TunedModelState": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel.State",
+ "google.generativeai.types.TunedModelState.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.TunedModelState.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.TunedModelState.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.TunedModelState.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.TypedDict": "typing_extensions.TypedDict",
+ "google.generativeai.types.get_default_file_client": "google.generativeai.client.get_default_file_client",
+ "google.generativeai.types.to_file_data": "google.generativeai.types.file_types.to_file_data",
+ "google.generativeai.update_tuned_model": "google.generativeai.models.update_tuned_model",
+ "google.generativeai.upload_file": "google.generativeai.files.upload_file"
+ },
+ "py_module_names": {
+ "google.generativeai": "google.generativeai"
+ }
+}
diff --git a/docs/api/google/generativeai/_redirects.yaml b/docs/api/google/generativeai/_redirects.yaml
new file mode 100644
index 000000000..cea696430
--- /dev/null
+++ b/docs/api/google/generativeai/_redirects.yaml
@@ -0,0 +1,13 @@
+redirects:
+- from: /api/python/google/generativeai/GenerationConfig
+ to: /api/python/google/generativeai/types/GenerationConfig
+- from: /api/python/google/generativeai/protos/ContentFilter/BlockedReason
+ to: /api/python/google/generativeai/types/BlockedReason
+- from: /api/python/google/generativeai/protos/SafetyRating/HarmProbability
+ to: /api/python/google/generativeai/types/HarmProbability
+- from: /api/python/google/generativeai/protos/SafetySetting/HarmBlockThreshold
+ to: /api/python/google/generativeai/types/HarmBlockThreshold
+- from: /api/python/google/generativeai/protos/TunedModel/State
+ to: /api/python/google/generativeai/types/TunedModelState
+- from: /api/python/google/generativeai/types/ModelNameOptions
+ to: /api/python/google/generativeai/types/AnyModelNameOptions
diff --git a/docs/api/google/generativeai/_toc.yaml b/docs/api/google/generativeai/_toc.yaml
new file mode 100644
index 000000000..99797d5d8
--- /dev/null
+++ b/docs/api/google/generativeai/_toc.yaml
@@ -0,0 +1,507 @@
+toc:
+- title: google.generativeai
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai
+ - title: ChatSession
+ path: /api/python/google/generativeai/ChatSession
+ - title: GenerativeModel
+ path: /api/python/google/generativeai/GenerativeModel
+ - title: chat
+ path: /api/python/google/generativeai/chat
+ - title: chat_async
+ path: /api/python/google/generativeai/chat_async
+ - title: configure
+ path: /api/python/google/generativeai/configure
+ - title: count_message_tokens
+ path: /api/python/google/generativeai/count_message_tokens
+ - title: count_text_tokens
+ path: /api/python/google/generativeai/count_text_tokens
+ - title: create_tuned_model
+ path: /api/python/google/generativeai/create_tuned_model
+ - title: delete_file
+ path: /api/python/google/generativeai/delete_file
+ - title: delete_tuned_model
+ path: /api/python/google/generativeai/delete_tuned_model
+ - title: embed_content
+ path: /api/python/google/generativeai/embed_content
+ - title: embed_content_async
+ path: /api/python/google/generativeai/embed_content_async
+ - title: generate_embeddings
+ path: /api/python/google/generativeai/generate_embeddings
+ - title: generate_text
+ path: /api/python/google/generativeai/generate_text
+ - title: get_base_model
+ path: /api/python/google/generativeai/get_base_model
+ - title: get_file
+ path: /api/python/google/generativeai/get_file
+ - title: get_model
+ path: /api/python/google/generativeai/get_model
+ - title: get_operation
+ path: /api/python/google/generativeai/get_operation
+ - title: get_tuned_model
+ path: /api/python/google/generativeai/get_tuned_model
+ - title: list_files
+ path: /api/python/google/generativeai/list_files
+ - title: list_models
+ path: /api/python/google/generativeai/list_models
+ - title: list_operations
+ path: /api/python/google/generativeai/list_operations
+ - title: list_tuned_models
+ path: /api/python/google/generativeai/list_tuned_models
+ - title: update_tuned_model
+ path: /api/python/google/generativeai/update_tuned_model
+ - title: upload_file
+ path: /api/python/google/generativeai/upload_file
+ - title: protos
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai/protos
+ - title: AttributionSourceId
+ path: /api/python/google/generativeai/protos/AttributionSourceId
+ - title: AttributionSourceId.GroundingPassageId
+ path: /api/python/google/generativeai/protos/AttributionSourceId/GroundingPassageId
+ - title: AttributionSourceId.SemanticRetrieverChunk
+ path: /api/python/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk
+ - title: BatchCreateChunksRequest
+ path: /api/python/google/generativeai/protos/BatchCreateChunksRequest
+ - title: BatchCreateChunksResponse
+ path: /api/python/google/generativeai/protos/BatchCreateChunksResponse
+ - title: BatchDeleteChunksRequest
+ path: /api/python/google/generativeai/protos/BatchDeleteChunksRequest
+ - title: BatchEmbedContentsRequest
+ path: /api/python/google/generativeai/protos/BatchEmbedContentsRequest
+ - title: BatchEmbedContentsResponse
+ path: /api/python/google/generativeai/protos/BatchEmbedContentsResponse
+ - title: BatchEmbedTextRequest
+ path: /api/python/google/generativeai/protos/BatchEmbedTextRequest
+ - title: BatchEmbedTextResponse
+ path: /api/python/google/generativeai/protos/BatchEmbedTextResponse
+ - title: BatchUpdateChunksRequest
+ path: /api/python/google/generativeai/protos/BatchUpdateChunksRequest
+ - title: BatchUpdateChunksResponse
+ path: /api/python/google/generativeai/protos/BatchUpdateChunksResponse
+ - title: Blob
+ path: /api/python/google/generativeai/protos/Blob
+ - title: CachedContent
+ path: /api/python/google/generativeai/protos/CachedContent
+ - title: CachedContent.UsageMetadata
+ path: /api/python/google/generativeai/protos/CachedContent/UsageMetadata
+ - title: Candidate
+ path: /api/python/google/generativeai/protos/Candidate
+ - title: Candidate.FinishReason
+ path: /api/python/google/generativeai/protos/Candidate/FinishReason
+ - title: Chunk
+ path: /api/python/google/generativeai/protos/Chunk
+ - title: Chunk.State
+ path: /api/python/google/generativeai/protos/Chunk/State
+ - title: ChunkData
+ path: /api/python/google/generativeai/protos/ChunkData
+ - title: CitationMetadata
+ path: /api/python/google/generativeai/protos/CitationMetadata
+ - title: CitationSource
+ path: /api/python/google/generativeai/protos/CitationSource
+ - title: CodeExecution
+ path: /api/python/google/generativeai/protos/CodeExecution
+ - title: CodeExecutionResult
+ path: /api/python/google/generativeai/protos/CodeExecutionResult
+ - title: CodeExecutionResult.Outcome
+ path: /api/python/google/generativeai/protos/CodeExecutionResult/Outcome
+ - title: Condition
+ path: /api/python/google/generativeai/protos/Condition
+ - title: Condition.Operator
+ path: /api/python/google/generativeai/protos/Condition/Operator
+ - title: Content
+ path: /api/python/google/generativeai/protos/Content
+ - title: ContentEmbedding
+ path: /api/python/google/generativeai/protos/ContentEmbedding
+ - title: ContentFilter
+ path: /api/python/google/generativeai/protos/ContentFilter
+ - title: Corpus
+ path: /api/python/google/generativeai/protos/Corpus
+ - title: CountMessageTokensRequest
+ path: /api/python/google/generativeai/protos/CountMessageTokensRequest
+ - title: CountMessageTokensResponse
+ path: /api/python/google/generativeai/protos/CountMessageTokensResponse
+ - title: CountTextTokensRequest
+ path: /api/python/google/generativeai/protos/CountTextTokensRequest
+ - title: CountTextTokensResponse
+ path: /api/python/google/generativeai/protos/CountTextTokensResponse
+ - title: CountTokensRequest
+ path: /api/python/google/generativeai/protos/CountTokensRequest
+ - title: CountTokensResponse
+ path: /api/python/google/generativeai/protos/CountTokensResponse
+ - title: CreateCachedContentRequest
+ path: /api/python/google/generativeai/protos/CreateCachedContentRequest
+ - title: CreateChunkRequest
+ path: /api/python/google/generativeai/protos/CreateChunkRequest
+ - title: CreateCorpusRequest
+ path: /api/python/google/generativeai/protos/CreateCorpusRequest
+ - title: CreateDocumentRequest
+ path: /api/python/google/generativeai/protos/CreateDocumentRequest
+ - title: CreateFileRequest
+ path: /api/python/google/generativeai/protos/CreateFileRequest
+ - title: CreateFileResponse
+ path: /api/python/google/generativeai/protos/CreateFileResponse
+ - title: CreatePermissionRequest
+ path: /api/python/google/generativeai/protos/CreatePermissionRequest
+ - title: CreateTunedModelMetadata
+ path: /api/python/google/generativeai/protos/CreateTunedModelMetadata
+ - title: CreateTunedModelRequest
+ path: /api/python/google/generativeai/protos/CreateTunedModelRequest
+ - title: CustomMetadata
+ path: /api/python/google/generativeai/protos/CustomMetadata
+ - title: Dataset
+ path: /api/python/google/generativeai/protos/Dataset
+ - title: DeleteCachedContentRequest
+ path: /api/python/google/generativeai/protos/DeleteCachedContentRequest
+ - title: DeleteChunkRequest
+ path: /api/python/google/generativeai/protos/DeleteChunkRequest
+ - title: DeleteCorpusRequest
+ path: /api/python/google/generativeai/protos/DeleteCorpusRequest
+ - title: DeleteDocumentRequest
+ path: /api/python/google/generativeai/protos/DeleteDocumentRequest
+ - title: DeleteFileRequest
+ path: /api/python/google/generativeai/protos/DeleteFileRequest
+ - title: DeletePermissionRequest
+ path: /api/python/google/generativeai/protos/DeletePermissionRequest
+ - title: DeleteTunedModelRequest
+ path: /api/python/google/generativeai/protos/DeleteTunedModelRequest
+ - title: Document
+ path: /api/python/google/generativeai/protos/Document
+ - title: EmbedContentRequest
+ path: /api/python/google/generativeai/protos/EmbedContentRequest
+ - title: EmbedContentResponse
+ path: /api/python/google/generativeai/protos/EmbedContentResponse
+ - title: EmbedTextRequest
+ path: /api/python/google/generativeai/protos/EmbedTextRequest
+ - title: EmbedTextResponse
+ path: /api/python/google/generativeai/protos/EmbedTextResponse
+ - title: Embedding
+ path: /api/python/google/generativeai/protos/Embedding
+ - title: Example
+ path: /api/python/google/generativeai/protos/Example
+ - title: ExecutableCode
+ path: /api/python/google/generativeai/protos/ExecutableCode
+ - title: ExecutableCode.Language
+ path: /api/python/google/generativeai/protos/ExecutableCode/Language
+ - title: File
+ path: /api/python/google/generativeai/protos/File
+ - title: File.State
+ path: /api/python/google/generativeai/protos/File/State
+ - title: FileData
+ path: /api/python/google/generativeai/protos/FileData
+ - title: FunctionCall
+ path: /api/python/google/generativeai/protos/FunctionCall
+ - title: FunctionCallingConfig
+ path: /api/python/google/generativeai/protos/FunctionCallingConfig
+ - title: FunctionCallingConfig.Mode
+ path: /api/python/google/generativeai/protos/FunctionCallingConfig/Mode
+ - title: FunctionDeclaration
+ path: /api/python/google/generativeai/protos/FunctionDeclaration
+ - title: FunctionResponse
+ path: /api/python/google/generativeai/protos/FunctionResponse
+ - title: GenerateAnswerRequest
+ path: /api/python/google/generativeai/protos/GenerateAnswerRequest
+ - title: GenerateAnswerRequest.AnswerStyle
+ path: /api/python/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle
+ - title: GenerateAnswerResponse
+ path: /api/python/google/generativeai/protos/GenerateAnswerResponse
+ - title: GenerateAnswerResponse.InputFeedback
+ path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback
+ - title: GenerateAnswerResponse.InputFeedback.BlockReason
+ path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason
+ - title: GenerateContentRequest
+ path: /api/python/google/generativeai/protos/GenerateContentRequest
+ - title: GenerateContentResponse
+ path: /api/python/google/generativeai/protos/GenerateContentResponse
+ - title: GenerateContentResponse.PromptFeedback
+ path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback
+ - title: GenerateContentResponse.PromptFeedback.BlockReason
+ path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason
+ - title: GenerateContentResponse.UsageMetadata
+ path: /api/python/google/generativeai/protos/GenerateContentResponse/UsageMetadata
+ - title: GenerateMessageRequest
+ path: /api/python/google/generativeai/protos/GenerateMessageRequest
+ - title: GenerateMessageResponse
+ path: /api/python/google/generativeai/protos/GenerateMessageResponse
+ - title: GenerateTextRequest
+ path: /api/python/google/generativeai/protos/GenerateTextRequest
+ - title: GenerateTextResponse
+ path: /api/python/google/generativeai/protos/GenerateTextResponse
+ - title: GenerationConfig
+ path: /api/python/google/generativeai/protos/GenerationConfig
+ - title: GetCachedContentRequest
+ path: /api/python/google/generativeai/protos/GetCachedContentRequest
+ - title: GetChunkRequest
+ path: /api/python/google/generativeai/protos/GetChunkRequest
+ - title: GetCorpusRequest
+ path: /api/python/google/generativeai/protos/GetCorpusRequest
+ - title: GetDocumentRequest
+ path: /api/python/google/generativeai/protos/GetDocumentRequest
+ - title: GetFileRequest
+ path: /api/python/google/generativeai/protos/GetFileRequest
+ - title: GetModelRequest
+ path: /api/python/google/generativeai/protos/GetModelRequest
+ - title: GetPermissionRequest
+ path: /api/python/google/generativeai/protos/GetPermissionRequest
+ - title: GetTunedModelRequest
+ path: /api/python/google/generativeai/protos/GetTunedModelRequest
+ - title: GroundingAttribution
+ path: /api/python/google/generativeai/protos/GroundingAttribution
+ - title: GroundingPassage
+ path: /api/python/google/generativeai/protos/GroundingPassage
+ - title: GroundingPassages
+ path: /api/python/google/generativeai/protos/GroundingPassages
+ - title: HarmCategory
+ path: /api/python/google/generativeai/protos/HarmCategory
+ - title: Hyperparameters
+ path: /api/python/google/generativeai/protos/Hyperparameters
+ - title: ListCachedContentsRequest
+ path: /api/python/google/generativeai/protos/ListCachedContentsRequest
+ - title: ListCachedContentsResponse
+ path: /api/python/google/generativeai/protos/ListCachedContentsResponse
+ - title: ListChunksRequest
+ path: /api/python/google/generativeai/protos/ListChunksRequest
+ - title: ListChunksResponse
+ path: /api/python/google/generativeai/protos/ListChunksResponse
+ - title: ListCorporaRequest
+ path: /api/python/google/generativeai/protos/ListCorporaRequest
+ - title: ListCorporaResponse
+ path: /api/python/google/generativeai/protos/ListCorporaResponse
+ - title: ListDocumentsRequest
+ path: /api/python/google/generativeai/protos/ListDocumentsRequest
+ - title: ListDocumentsResponse
+ path: /api/python/google/generativeai/protos/ListDocumentsResponse
+ - title: ListFilesRequest
+ path: /api/python/google/generativeai/protos/ListFilesRequest
+ - title: ListFilesResponse
+ path: /api/python/google/generativeai/protos/ListFilesResponse
+ - title: ListModelsRequest
+ path: /api/python/google/generativeai/protos/ListModelsRequest
+ - title: ListModelsResponse
+ path: /api/python/google/generativeai/protos/ListModelsResponse
+ - title: ListPermissionsRequest
+ path: /api/python/google/generativeai/protos/ListPermissionsRequest
+ - title: ListPermissionsResponse
+ path: /api/python/google/generativeai/protos/ListPermissionsResponse
+ - title: ListTunedModelsRequest
+ path: /api/python/google/generativeai/protos/ListTunedModelsRequest
+ - title: ListTunedModelsResponse
+ path: /api/python/google/generativeai/protos/ListTunedModelsResponse
+ - title: Message
+ path: /api/python/google/generativeai/protos/Message
+ - title: MessagePrompt
+ path: /api/python/google/generativeai/protos/MessagePrompt
+ - title: MetadataFilter
+ path: /api/python/google/generativeai/protos/MetadataFilter
+ - title: Model
+ path: /api/python/google/generativeai/protos/Model
+ - title: Part
+ path: /api/python/google/generativeai/protos/Part
+ - title: Permission
+ path: /api/python/google/generativeai/protos/Permission
+ - title: Permission.GranteeType
+ path: /api/python/google/generativeai/protos/Permission/GranteeType
+ - title: Permission.Role
+ path: /api/python/google/generativeai/protos/Permission/Role
+ - title: QueryCorpusRequest
+ path: /api/python/google/generativeai/protos/QueryCorpusRequest
+ - title: QueryCorpusResponse
+ path: /api/python/google/generativeai/protos/QueryCorpusResponse
+ - title: QueryDocumentRequest
+ path: /api/python/google/generativeai/protos/QueryDocumentRequest
+ - title: QueryDocumentResponse
+ path: /api/python/google/generativeai/protos/QueryDocumentResponse
+ - title: RelevantChunk
+ path: /api/python/google/generativeai/protos/RelevantChunk
+ - title: SafetyFeedback
+ path: /api/python/google/generativeai/protos/SafetyFeedback
+ - title: SafetyRating
+ path: /api/python/google/generativeai/protos/SafetyRating
+ - title: SafetySetting
+ path: /api/python/google/generativeai/protos/SafetySetting
+ - title: Schema
+ path: /api/python/google/generativeai/protos/Schema
+ - title: Schema.PropertiesEntry
+ path: /api/python/google/generativeai/protos/Schema/PropertiesEntry
+ - title: SemanticRetrieverConfig
+ path: /api/python/google/generativeai/protos/SemanticRetrieverConfig
+ - title: StringList
+ path: /api/python/google/generativeai/protos/StringList
+ - title: TaskType
+ path: /api/python/google/generativeai/protos/TaskType
+ - title: TextCompletion
+ path: /api/python/google/generativeai/protos/TextCompletion
+ - title: TextPrompt
+ path: /api/python/google/generativeai/protos/TextPrompt
+ - title: Tool
+ path: /api/python/google/generativeai/protos/Tool
+ - title: ToolConfig
+ path: /api/python/google/generativeai/protos/ToolConfig
+ - title: TransferOwnershipRequest
+ path: /api/python/google/generativeai/protos/TransferOwnershipRequest
+ - title: TransferOwnershipResponse
+ path: /api/python/google/generativeai/protos/TransferOwnershipResponse
+ - title: TunedModel
+ path: /api/python/google/generativeai/protos/TunedModel
+ - title: TunedModelSource
+ path: /api/python/google/generativeai/protos/TunedModelSource
+ - title: TuningExample
+ path: /api/python/google/generativeai/protos/TuningExample
+ - title: TuningExamples
+ path: /api/python/google/generativeai/protos/TuningExamples
+ - title: TuningSnapshot
+ path: /api/python/google/generativeai/protos/TuningSnapshot
+ - title: TuningTask
+ path: /api/python/google/generativeai/protos/TuningTask
+ - title: Type
+ path: /api/python/google/generativeai/protos/Type
+ - title: UpdateCachedContentRequest
+ path: /api/python/google/generativeai/protos/UpdateCachedContentRequest
+ - title: UpdateChunkRequest
+ path: /api/python/google/generativeai/protos/UpdateChunkRequest
+ - title: UpdateCorpusRequest
+ path: /api/python/google/generativeai/protos/UpdateCorpusRequest
+ - title: UpdateDocumentRequest
+ path: /api/python/google/generativeai/protos/UpdateDocumentRequest
+ - title: UpdatePermissionRequest
+ path: /api/python/google/generativeai/protos/UpdatePermissionRequest
+ - title: UpdateTunedModelRequest
+ path: /api/python/google/generativeai/protos/UpdateTunedModelRequest
+ - title: VideoMetadata
+ path: /api/python/google/generativeai/protos/VideoMetadata
+ - title: types
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai/types
+ - title: AnyModelNameOptions
+ path: /api/python/google/generativeai/types/AnyModelNameOptions
+ - title: AsyncGenerateContentResponse
+ path: /api/python/google/generativeai/types/AsyncGenerateContentResponse
+ - title: AuthorError
+ path: /api/python/google/generativeai/types/AuthorError
+ - title: BaseModelNameOptions
+ path: /api/python/google/generativeai/types/BaseModelNameOptions
+ - title: BlobDict
+ path: /api/python/google/generativeai/types/BlobDict
+ - title: BlobType
+ path: /api/python/google/generativeai/types/BlobType
+ - title: BlockedPromptException
+ path: /api/python/google/generativeai/types/BlockedPromptException
+ - title: BlockedReason
+ path: /api/python/google/generativeai/types/BlockedReason
+ - title: BrokenResponseError
+ path: /api/python/google/generativeai/types/BrokenResponseError
+ - title: CallableFunctionDeclaration
+ path: /api/python/google/generativeai/types/CallableFunctionDeclaration
+ - title: ChatResponse
+ path: /api/python/google/generativeai/types/ChatResponse
+ - title: CitationMetadataDict
+ path: /api/python/google/generativeai/types/CitationMetadataDict
+ - title: CitationSourceDict
+ path: /api/python/google/generativeai/types/CitationSourceDict
+ - title: Completion
+ path: /api/python/google/generativeai/types/Completion
+ - title: ContentDict
+ path: /api/python/google/generativeai/types/ContentDict
+ - title: ContentFilterDict
+ path: /api/python/google/generativeai/types/ContentFilterDict
+ - title: ContentType
+ path: /api/python/google/generativeai/types/ContentType
+ - title: ContentsType
+ path: /api/python/google/generativeai/types/ContentsType
+ - title: ExampleDict
+ path: /api/python/google/generativeai/types/ExampleDict
+ - title: ExampleOptions
+ path: /api/python/google/generativeai/types/ExampleOptions
+ - title: ExamplesOptions
+ path: /api/python/google/generativeai/types/ExamplesOptions
+ - title: File
+ path: /api/python/google/generativeai/types/File
+ - title: FileDataDict
+ path: /api/python/google/generativeai/types/FileDataDict
+ - title: FileDataType
+ path: /api/python/google/generativeai/types/FileDataType
+ - title: FunctionDeclaration
+ path: /api/python/google/generativeai/types/FunctionDeclaration
+ - title: FunctionDeclarationType
+ path: /api/python/google/generativeai/types/FunctionDeclarationType
+ - title: FunctionLibrary
+ path: /api/python/google/generativeai/types/FunctionLibrary
+ - title: FunctionLibraryType
+ path: /api/python/google/generativeai/types/FunctionLibraryType
+ - title: GenerateContentResponse
+ path: /api/python/google/generativeai/types/GenerateContentResponse
+ - title: GenerationConfig
+ path: /api/python/google/generativeai/types/GenerationConfig
+ - title: GenerationConfigDict
+ path: /api/python/google/generativeai/types/GenerationConfigDict
+ - title: GenerationConfigType
+ path: /api/python/google/generativeai/types/GenerationConfigType
+ - title: HarmBlockThreshold
+ path: /api/python/google/generativeai/types/HarmBlockThreshold
+ - title: HarmCategory
+ path: /api/python/google/generativeai/types/HarmCategory
+ - title: HarmProbability
+ path: /api/python/google/generativeai/types/HarmProbability
+ - title: IncompleteIterationError
+ path: /api/python/google/generativeai/types/IncompleteIterationError
+ - title: MessageDict
+ path: /api/python/google/generativeai/types/MessageDict
+ - title: MessageOptions
+ path: /api/python/google/generativeai/types/MessageOptions
+ - title: MessagePromptDict
+ path: /api/python/google/generativeai/types/MessagePromptDict
+ - title: MessagePromptOptions
+ path: /api/python/google/generativeai/types/MessagePromptOptions
+ - title: MessagesOptions
+ path: /api/python/google/generativeai/types/MessagesOptions
+ - title: Model
+ path: /api/python/google/generativeai/types/Model
+ - title: ModelsIterable
+ path: /api/python/google/generativeai/types/ModelsIterable
+ - title: PartDict
+ path: /api/python/google/generativeai/types/PartDict
+ - title: PartType
+ path: /api/python/google/generativeai/types/PartType
+ - title: Permission
+ path: /api/python/google/generativeai/types/Permission
+ - title: Permissions
+ path: /api/python/google/generativeai/types/Permissions
+ - title: RequestOptions
+ path: /api/python/google/generativeai/types/RequestOptions
+ - title: RequestOptionsType
+ path: /api/python/google/generativeai/types/RequestOptionsType
+ - title: ResponseDict
+ path: /api/python/google/generativeai/types/ResponseDict
+ - title: SafetyFeedbackDict
+ path: /api/python/google/generativeai/types/SafetyFeedbackDict
+ - title: SafetyRatingDict
+ path: /api/python/google/generativeai/types/SafetyRatingDict
+ - title: SafetySettingDict
+ path: /api/python/google/generativeai/types/SafetySettingDict
+ - title: Status
+ path: /api/python/google/generativeai/types/Status
+ - title: StopCandidateException
+ path: /api/python/google/generativeai/types/StopCandidateException
+ - title: StrictContentType
+ path: /api/python/google/generativeai/types/StrictContentType
+ - title: Tool
+ path: /api/python/google/generativeai/types/Tool
+ - title: ToolDict
+ path: /api/python/google/generativeai/types/ToolDict
+ - title: ToolsType
+ path: /api/python/google/generativeai/types/ToolsType
+ - title: TunedModel
+ path: /api/python/google/generativeai/types/TunedModel
+ - title: TunedModelNameOptions
+ path: /api/python/google/generativeai/types/TunedModelNameOptions
+ - title: TunedModelState
+ path: /api/python/google/generativeai/types/TunedModelState
+ - title: TypedDict
+ path: /api/python/google/generativeai/types/TypedDict
+ - title: get_default_file_client
+ path: /api/python/google/generativeai/types/get_default_file_client
+ - title: to_file_data
+ path: /api/python/google/generativeai/types/to_file_data
diff --git a/docs/api/google/generativeai/all_symbols.md b/docs/api/google/generativeai/all_symbols.md
new file mode 100644
index 000000000..a6fa84caf
--- /dev/null
+++ b/docs/api/google/generativeai/all_symbols.md
@@ -0,0 +1,261 @@
+# All symbols in Generative AI - Python
+
+
+
+## Primary symbols
+* google.generativeai
+* google.generativeai.ChatSession
+* google.generativeai.GenerationConfig
+* google.generativeai.GenerativeModel
+* google.generativeai.chat
+* google.generativeai.chat_async
+* google.generativeai.configure
+* google.generativeai.count_message_tokens
+* google.generativeai.count_text_tokens
+* google.generativeai.create_tuned_model
+* google.generativeai.delete_file
+* google.generativeai.delete_tuned_model
+* google.generativeai.embed_content
+* google.generativeai.embed_content_async
+* google.generativeai.generate_embeddings
+* google.generativeai.generate_text
+* google.generativeai.get_base_model
+* google.generativeai.get_file
+* google.generativeai.get_model
+* google.generativeai.get_operation
+* google.generativeai.get_tuned_model
+* google.generativeai.list_files
+* google.generativeai.list_models
+* google.generativeai.list_operations
+* google.generativeai.list_tuned_models
+* google.generativeai.protos
+* google.generativeai.protos.AttributionSourceId
+* google.generativeai.protos.AttributionSourceId.GroundingPassageId
+* google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk
+* google.generativeai.protos.BatchCreateChunksRequest
+* google.generativeai.protos.BatchCreateChunksResponse
+* google.generativeai.protos.BatchDeleteChunksRequest
+* google.generativeai.protos.BatchEmbedContentsRequest
+* google.generativeai.protos.BatchEmbedContentsResponse
+* google.generativeai.protos.BatchEmbedTextRequest
+* google.generativeai.protos.BatchEmbedTextResponse
+* google.generativeai.protos.BatchUpdateChunksRequest
+* google.generativeai.protos.BatchUpdateChunksResponse
+* google.generativeai.protos.Blob
+* google.generativeai.protos.CachedContent
+* google.generativeai.protos.CachedContent.UsageMetadata
+* google.generativeai.protos.Candidate
+* google.generativeai.protos.Candidate.FinishReason
+* google.generativeai.protos.Chunk
+* google.generativeai.protos.Chunk.State
+* google.generativeai.protos.ChunkData
+* google.generativeai.protos.CitationMetadata
+* google.generativeai.protos.CitationSource
+* google.generativeai.protos.CodeExecution
+* google.generativeai.protos.CodeExecutionResult
+* google.generativeai.protos.CodeExecutionResult.Outcome
+* google.generativeai.protos.Condition
+* google.generativeai.protos.Condition.Operator
+* google.generativeai.protos.Content
+* google.generativeai.protos.ContentEmbedding
+* google.generativeai.protos.ContentFilter
+* google.generativeai.protos.ContentFilter.BlockedReason
+* google.generativeai.protos.Corpus
+* google.generativeai.protos.CountMessageTokensRequest
+* google.generativeai.protos.CountMessageTokensResponse
+* google.generativeai.protos.CountTextTokensRequest
+* google.generativeai.protos.CountTextTokensResponse
+* google.generativeai.protos.CountTokensRequest
+* google.generativeai.protos.CountTokensResponse
+* google.generativeai.protos.CreateCachedContentRequest
+* google.generativeai.protos.CreateChunkRequest
+* google.generativeai.protos.CreateCorpusRequest
+* google.generativeai.protos.CreateDocumentRequest
+* google.generativeai.protos.CreateFileRequest
+* google.generativeai.protos.CreateFileResponse
+* google.generativeai.protos.CreatePermissionRequest
+* google.generativeai.protos.CreateTunedModelMetadata
+* google.generativeai.protos.CreateTunedModelRequest
+* google.generativeai.protos.CustomMetadata
+* google.generativeai.protos.Dataset
+* google.generativeai.protos.DeleteCachedContentRequest
+* google.generativeai.protos.DeleteChunkRequest
+* google.generativeai.protos.DeleteCorpusRequest
+* google.generativeai.protos.DeleteDocumentRequest
+* google.generativeai.protos.DeleteFileRequest
+* google.generativeai.protos.DeletePermissionRequest
+* google.generativeai.protos.DeleteTunedModelRequest
+* google.generativeai.protos.Document
+* google.generativeai.protos.EmbedContentRequest
+* google.generativeai.protos.EmbedContentResponse
+* google.generativeai.protos.EmbedTextRequest
+* google.generativeai.protos.EmbedTextResponse
+* google.generativeai.protos.Embedding
+* google.generativeai.protos.Example
+* google.generativeai.protos.ExecutableCode
+* google.generativeai.protos.ExecutableCode.Language
+* google.generativeai.protos.File
+* google.generativeai.protos.File.State
+* google.generativeai.protos.FileData
+* google.generativeai.protos.FunctionCall
+* google.generativeai.protos.FunctionCallingConfig
+* google.generativeai.protos.FunctionCallingConfig.Mode
+* google.generativeai.protos.FunctionDeclaration
+* google.generativeai.protos.FunctionResponse
+* google.generativeai.protos.GenerateAnswerRequest
+* google.generativeai.protos.GenerateAnswerRequest.AnswerStyle
+* google.generativeai.protos.GenerateAnswerResponse
+* google.generativeai.protos.GenerateAnswerResponse.InputFeedback
+* google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason
+* google.generativeai.protos.GenerateContentRequest
+* google.generativeai.protos.GenerateContentResponse
+* google.generativeai.protos.GenerateContentResponse.PromptFeedback
+* google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason
+* google.generativeai.protos.GenerateContentResponse.UsageMetadata
+* google.generativeai.protos.GenerateMessageRequest
+* google.generativeai.protos.GenerateMessageResponse
+* google.generativeai.protos.GenerateTextRequest
+* google.generativeai.protos.GenerateTextResponse
+* google.generativeai.protos.GenerationConfig
+* google.generativeai.protos.GetCachedContentRequest
+* google.generativeai.protos.GetChunkRequest
+* google.generativeai.protos.GetCorpusRequest
+* google.generativeai.protos.GetDocumentRequest
+* google.generativeai.protos.GetFileRequest
+* google.generativeai.protos.GetModelRequest
+* google.generativeai.protos.GetPermissionRequest
+* google.generativeai.protos.GetTunedModelRequest
+* google.generativeai.protos.GroundingAttribution
+* google.generativeai.protos.GroundingPassage
+* google.generativeai.protos.GroundingPassages
+* google.generativeai.protos.HarmCategory
+* google.generativeai.protos.Hyperparameters
+* google.generativeai.protos.ListCachedContentsRequest
+* google.generativeai.protos.ListCachedContentsResponse
+* google.generativeai.protos.ListChunksRequest
+* google.generativeai.protos.ListChunksResponse
+* google.generativeai.protos.ListCorporaRequest
+* google.generativeai.protos.ListCorporaResponse
+* google.generativeai.protos.ListDocumentsRequest
+* google.generativeai.protos.ListDocumentsResponse
+* google.generativeai.protos.ListFilesRequest
+* google.generativeai.protos.ListFilesResponse
+* google.generativeai.protos.ListModelsRequest
+* google.generativeai.protos.ListModelsResponse
+* google.generativeai.protos.ListPermissionsRequest
+* google.generativeai.protos.ListPermissionsResponse
+* google.generativeai.protos.ListTunedModelsRequest
+* google.generativeai.protos.ListTunedModelsResponse
+* google.generativeai.protos.Message
+* google.generativeai.protos.MessagePrompt
+* google.generativeai.protos.MetadataFilter
+* google.generativeai.protos.Model
+* google.generativeai.protos.Part
+* google.generativeai.protos.Permission
+* google.generativeai.protos.Permission.GranteeType
+* google.generativeai.protos.Permission.Role
+* google.generativeai.protos.QueryCorpusRequest
+* google.generativeai.protos.QueryCorpusResponse
+* google.generativeai.protos.QueryDocumentRequest
+* google.generativeai.protos.QueryDocumentResponse
+* google.generativeai.protos.RelevantChunk
+* google.generativeai.protos.SafetyFeedback
+* google.generativeai.protos.SafetyRating
+* google.generativeai.protos.SafetyRating.HarmProbability
+* google.generativeai.protos.SafetySetting
+* google.generativeai.protos.SafetySetting.HarmBlockThreshold
+* google.generativeai.protos.Schema
+* google.generativeai.protos.Schema.PropertiesEntry
+* google.generativeai.protos.SemanticRetrieverConfig
+* google.generativeai.protos.StringList
+* google.generativeai.protos.TaskType
+* google.generativeai.protos.TextCompletion
+* google.generativeai.protos.TextPrompt
+* google.generativeai.protos.Tool
+* google.generativeai.protos.ToolConfig
+* google.generativeai.protos.TransferOwnershipRequest
+* google.generativeai.protos.TransferOwnershipResponse
+* google.generativeai.protos.TunedModel
+* google.generativeai.protos.TunedModel.State
+* google.generativeai.protos.TunedModelSource
+* google.generativeai.protos.TuningExample
+* google.generativeai.protos.TuningExamples
+* google.generativeai.protos.TuningSnapshot
+* google.generativeai.protos.TuningTask
+* google.generativeai.protos.Type
+* google.generativeai.protos.UpdateCachedContentRequest
+* google.generativeai.protos.UpdateChunkRequest
+* google.generativeai.protos.UpdateCorpusRequest
+* google.generativeai.protos.UpdateDocumentRequest
+* google.generativeai.protos.UpdatePermissionRequest
+* google.generativeai.protos.UpdateTunedModelRequest
+* google.generativeai.protos.VideoMetadata
+* google.generativeai.types
+* google.generativeai.types.AnyModelNameOptions
+* google.generativeai.types.AsyncGenerateContentResponse
+* google.generativeai.types.AuthorError
+* google.generativeai.types.BaseModelNameOptions
+* google.generativeai.types.BlobDict
+* google.generativeai.types.BlobType
+* google.generativeai.types.BlockedPromptException
+* google.generativeai.types.BlockedReason
+* google.generativeai.types.BrokenResponseError
+* google.generativeai.types.CallableFunctionDeclaration
+* google.generativeai.types.ChatResponse
+* google.generativeai.types.CitationMetadataDict
+* google.generativeai.types.CitationSourceDict
+* google.generativeai.types.Completion
+* google.generativeai.types.ContentDict
+* google.generativeai.types.ContentFilterDict
+* google.generativeai.types.ContentType
+* google.generativeai.types.ContentsType
+* google.generativeai.types.ExampleDict
+* google.generativeai.types.ExampleOptions
+* google.generativeai.types.ExamplesOptions
+* google.generativeai.types.File
+* google.generativeai.types.FileDataDict
+* google.generativeai.types.FileDataType
+* google.generativeai.types.FunctionDeclaration
+* google.generativeai.types.FunctionDeclarationType
+* google.generativeai.types.FunctionLibrary
+* google.generativeai.types.FunctionLibraryType
+* google.generativeai.types.GenerateContentResponse
+* google.generativeai.types.GenerationConfig
+* google.generativeai.types.GenerationConfigDict
+* google.generativeai.types.GenerationConfigType
+* google.generativeai.types.HarmBlockThreshold
+* google.generativeai.types.HarmCategory
+* google.generativeai.types.HarmProbability
+* google.generativeai.types.IncompleteIterationError
+* google.generativeai.types.MessageDict
+* google.generativeai.types.MessageOptions
+* google.generativeai.types.MessagePromptDict
+* google.generativeai.types.MessagePromptOptions
+* google.generativeai.types.MessagesOptions
+* google.generativeai.types.Model
+* google.generativeai.types.ModelNameOptions
+* google.generativeai.types.ModelsIterable
+* google.generativeai.types.PartDict
+* google.generativeai.types.PartType
+* google.generativeai.types.Permission
+* google.generativeai.types.Permissions
+* google.generativeai.types.RequestOptions
+* google.generativeai.types.RequestOptionsType
+* google.generativeai.types.ResponseDict
+* google.generativeai.types.SafetyFeedbackDict
+* google.generativeai.types.SafetyRatingDict
+* google.generativeai.types.SafetySettingDict
+* google.generativeai.types.Status
+* google.generativeai.types.StopCandidateException
+* google.generativeai.types.StrictContentType
+* google.generativeai.types.Tool
+* google.generativeai.types.ToolDict
+* google.generativeai.types.ToolsType
+* google.generativeai.types.TunedModel
+* google.generativeai.types.TunedModelNameOptions
+* google.generativeai.types.TunedModelState
+* google.generativeai.types.TypedDict
+* google.generativeai.types.get_default_file_client
+* google.generativeai.types.to_file_data
+* google.generativeai.update_tuned_model
+* google.generativeai.upload_file
\ No newline at end of file
diff --git a/docs/api/google/generativeai/api_report.pb b/docs/api/google/generativeai/api_report.pb
new file mode 100644
index 0000000000000000000000000000000000000000..96c5f00568db0f4db860421c667f7d315faa1484
GIT binary patch
literal 49595
zcmc&-&66BQm9JK?8;&9fPw)`|T0Um^G#_rpV|KL|mMN(v*$(XRpcw-}5ZkKhu9<16
zyQ@`QEoo$1G~QkCvak!mg|EW|7YNQAIL?XS$dLnw;li0efy|d#)mc@UFF!gpejb
zMIWF)eT@7aJx%}d;gI~RbNbtlUilRLdAURWvmkhm{tW+15L|he1mWT|{J7-TB5*fL
z?v19yjn2h?fIkzkHZFq%fW`m(?F&yB;5`gKqr;mg(_w!?w2qGwg8F2k!a3D-$#mVN
z_##q8_i|4%2p
zWFo*}mQMPEcyOEy`-60vC(~Tqei7Y%d<6pO_V4Id;X9r1b0)HI7D!}*fFE$9fFDlT
zR)}PNu-uXR!JD1%ITt}HD|2q>5Go;-ov>>n#5|9)ygwjhC@u(a-*RWgeK#+Q+(-C8
zSP+#6x5D?zBmk@nEAH94OcsCZGGGDs1VPRJ1uPZU&cY9>u((-$Bc|a8q=@PCaCEfD
zk_`dyOxWK`N^xK0^hzU65Y*^Z5l&{|Q*uDcDxA{`cDVonh3_G`Q4IphB>b2fF;DK7
zWX9l@Y|A>s-IvG9L6*dM($5!DLLL*Kaly1Rx_frn?)f97j^7HVUGA9e)PX!jAQ2-W
z$cLjbT}>LIxl7z^a+IE*Zpqc-AXW7!R>aJ?;es^}|5>(j$>cyFV8Mza?`<{#t5ysY
zF*prBp#?3Sk!r~I=av*u+@A`-6FFc>@SyH+G(DP&&bSnEok$98z&8p5m(bKZL{O3B
z*V#dd9BQpyFC8*iAvFRJVrD
zi)u0Va8PR`cM%Gh&Y(;ng#q*WfM;p0x-I0%DTS)KeVLDo8aglBfjlOTNAnywje3PZ
zYLacKC$&3r<^&8_w~FhDY6aAq8_2?`Lj7&IH370*xteZJzcum-gKc?F9s&VOSUdk(
zzAjJSW?7o2^X}z5&qfD}oD{En=^`5>FAhm<^CV;wP)a~q11S~Q`Q1WLG$~#GzY|`T
zIlUUb`+P>q8%nmjF_f2Ti6o1m-)KMXgx4%3+Dj(!G#?FaB*a+B-6ZQBFQ&IucDmX0
zZoO3B?u5@GQeBDj!Eq1r7r>t1NbW3>d9IRy)ufg5S1$u8H>7nBUn_|W^I1BbCk-KZ
za>Mw>QV0`6T!nP%@fzY9&47BW0@baCB$nw{<3Bs$zvy;EO$dBBPMa+ey%Z1S9SLzu
zfDrp6O;PPho_{)Rhi`??7#V@#w_pI1{rKu^ND7O`yC17E;D&HoiHSTw7dv5`9yEA>
zr#lhdc##mGO#;VVd6CNDde8yl?$^MLSQQ6EeAO~w{Z}WHcEV)9-B)S*^W`Lu$@<3)
zF*pqdF7Y4>KZ@-8UQ7Zc-E$30UI{^Dj3Tl9q4#y%*GkOp_0e=RKfaN~bLeapv?rhPj%W}-)&iUFZEO=5H{8D!e!*UjgAr+e
z(avmD03iQr*Gs2rd~lK{
zMQ2=$K7Y0(gNO3|Ysm%N3#N%R=Kul*UcYXTz0!?e`sNQ_qTv?xvY_65``03qNT9%{mK>T!p6!
z5e2r*RmOOFNSgcC?k5B2YY<%8oZ|VwlA`Te@*>_t>4|7JlKEn+tjkX+Ve({vAIaHqD7&tr?N7NZT`CPs-IrSVDR|Bd&zBwM3c8;B>~
zn0*a%_@w$Z%;A;$N%LgKad$krV33mZkur0G7_dZ7_o-oh+qmb_Sqi5~rn%}K
z{d^`rPAv);KBlBNR@tNp$)79}5>3j-5TP+k4^j#r4+s)WB=XLbQ?;gP%rWTeqj8>;
z+*0BOLUZUXOO;dkF>7h$Y_^y;6=i@d-?0SL9EbF1tLJ4l0l5#Oc&h$4*BThrHuKb+
z0?35Gd%sH{dP`GiI)l^_v;p6q8AvZkIucs0eocZFY@iFQR04Dx@Tl&z4Ri&F0IpD-
zZ$hW#*KI;}E-AL;Rj$B^he=_1GN@UA)#0cLoSWd3r8;)Kam@lS@RV|p3Z1L&gpQxW
zv2)?8>0mJ-aXFlB8T#X+TZZ2j
z-D+H&f$JG>oy9j2LC-eph;TNgtKNadbHCwdI^Q
z(8H1{Iv!`85*)mq)lL|3XJ_Z!mT4uP+X&DGYTH^JKP>`m@AW$#v8%Co^(OLD8&$
zoo#!njE8P^P7@!D6WT0S0soMuEx)sLDGiZSk-IO&)1w8w&d7CV$?9sP1+oX*K=$2E
z*h3nE3K809ixOjCBPA+Hd>)pNRQBH;jN!FK>SzX^jZ#N5cx{*B6Hcnl;IU_lj}y}D
zrtrg{=vwlzzBB5WwoE&hkKglvPT^V@Y{7fdE6_IF9iM&>uzfl>K3`G;^>*rEtg25Z
zHdfWE+h*ux4#B`4Jr&P|DW-Iyh38}u3!`@3wuSsYUg52a>42V__Tn+Uh%ddY@@twe
z*pdWZ_gxoY$)QRQ?&qrTfuwAc1x?{E6`)JIK7|zZlm&Xtwo-hr6JAH6v|5TbXpEQy
zJIF_b@yTnN=?aY$-iXfK7(SWL-56e}@J)Qy09g}{+=v(Lg$!4d!8nH5bmgqaP0CmA
zwHyz|tL6foOr*)LX>o=Qt`KP3b4i>k`D-c3<>~xhlBxO?=fY{QZC93%6jseh1?Vzy
z_VSZea#?50rAxT_Q73#EVN9(sZ4jr1#q~1z>q#;^hzH6$!as3IPk-78zidG>
z%pz5Jx>v^O;PwswSGP_(;Ws_X)&?UI^S$a*M4trtvrhO`-?Dk*OW%g`NuRzA=ao%=
z(h2u{uIeKvS5o8B7uHk>T)^(x!l8gIxspCCHqlj(YbgQF@8>ja`<`X`ta+P4AwqYT
zJV7_pGW-^0c-zwZ=Vpmy%_udATI2X%cEbNK*v{tCwZZblgkSS3q)(pzMJN2af9bq2
zy?^5kJYER)Z#=JTZy1U!+`}W^{~vx`O2u`W2M&a>DqE^E%kKGnwJHFe2_1x%0#GRd
z9)M110J3eO;N7EQO|Yj`HdqASbSWqBk&h}mt~?|Fwi^I%UU7KZw
zKXRvn27a@pR(zzb{=S~%))afM`P5~%<43nI#Mz`r-VZrSvy(z)hO(No0$BS{iru>u
zVcRG=@E0yB=sv57yJ!jBfb~`;Sg7kW|F=-rYhsU&dQ{BpF=NNW#tSFp{`QOFy6}i*7D6V3B1v6G3YPUp
z(gOVW!HG%nY)Qn%72kwTl;!BG0XRt$(=1lyE$dhvuZuwQ0vuNz0{6M)GkAE`MA3^h
zwUuSZPu6SjxcW{jkLN8EpC-ym)%m(dE
z*cRKGM{DI0v{8iOS2`4O_$)^Wkz+G?hy>m$QzskvtWn);;I&-EAFWla!XqnO1^gSS
zdJgm4C6h2-eTv+M3*JXRpiooY>$_to=*I$N$liR;PHCUcW~a1QC%cD!uA^>mkKTnB
z=3iMP*@@-=N9YXM#yY$(uYfBuRDOKqRH@*FIiMGnxvDF{GmTRhcEJnt8hi`t^6AfH+DAOlaA_u|7OKhe#*Li%c(r{hNn4EP3l+gS3z9WP5DiaceR
zQQ~%HuzN`^#SNHqfx0ux>HvS>ZjXHObkh9B1RueJeErN4kPLfcA6H?BbhvDmWcetW
zUz_G|YEypVn15{+_80khB3B-aZp$nrZ^Fg_n_pm(PX(MZ`!8uCZ(A+l<;x!V^wac+
zHX|CW`)$rFy_GSMJC2+gQQk1c}G%0A!
zEW;j7e-y4K=UlF1&t`s@WUt(tl0(kp(M*-s@zCRxR%-aLDMFds37-{+
zTrx_H$;&tW8WBf)3B&g0B!0fL>M|=n+Q2RC6nXH>ia?VHt9gLvl-k+WeKoux;z~}^
z*)YtO%l0+_-kZj=`Ee>qD}-FqHl5P$EMjTO(2bqN6DVLwTTl&QA&plj?!d<~Xxh|9
z4l_Z&cmk+C^Oq?)CtAXJ%UulA^O?68sOL3j!9N=V=Jc4aXl7XWCmT&3^5m?$bcr%}
z$4K~Yq$aom_1ba*-ZA3f=)f^o-3c9U*)Vin%V-^ZO;yVTSTzgVSO6mtR#CqV5%948
z)zL6Xi$m8&JmSfv)9RClN~E9i%t>e&55-_r)V}4jt#@Q!VV9ibYKkG!84bFFaxJCGzig!mF~o2d465#d{)t
z#CcQkj*EdfM3QuTxBjikrd{-(*dm9x(ht`d2tne-a5SiW^b3$09#YvujxfpY0K7o3qB@BwZ6L4^qs#W=T+6(lln5E@Bogz;waR#@O
zAuFG+-5(?~Uc3w1IeDF;d0auo-$8IHaTFiHk-)Z*oGJ#hW#~SWRK;Mn3f5b)YN4*r
zq}4)QugR;4Be5;r^_avex|w`&Gy2Js;)N**GdcOLDsN_(^w~f9TyXKeRr7VNB@NRN
zjoT5cp=ZF4mi;&KGLQ7>Uab@?Y#;LH0*fypGfBW6->kIe%%b*NiVM7k(#*0nFTRYu
zZUTV^OJsTIwiE^7pXB7nNa7QbW;RiLu9%zTI3^EQRPwpidDWIMRrhOQHI_U25UuEY
zH&MG;G8>;%vo@zT(p~whpAtlAGU9AsMe)XQYu}aS*+TU-PQKNgLaAaJyo%1clyYq5
zaZbG@#l!3zTYbo|6nb{Hw~}-Dc}g9VI#Dy>bY5dL;pAnJr$kWEc`g)$3XDzNO4pMY
z;?U#2Y02fNmb~TM)Y2(sZaoRK_zxo2Q
zN1N!aWHh8RCB3t&v;mVsT=8`?nTJ?|NRnUrPGA(>_NoP58XaUY4BpV1n5)b(TE(YM
zmOEG8C0XB#59Bj8QaInLD(3VVwq+X*lBhpgrfuX)9MYGhQ3=?V^W1=2LlV!uw>QGi
zOZG_@H;KQl8#(=Vz?ViD2-PkPH^nvUiU}I8(6DwJoqJ=U+cC%NYK>nadkQ;fPEBvJ
z!~3B7$7GF<)A6uq$J408J{-*Q+rXvYsQT;)Gj$WZjRu+GiyTT3e0IqcALmA~y|v10
z75CXMvsK({(@b$kKxuWtE(a%hGUsJGZy>>j|Emyj0)vAVv^^m{8=-PDr}{%{?c)8F
zHv9{1QHm;iz5Igqfg7mae9Bx|pWbAytXDr$d}L?kN9DHi=s}A4sTJsMmL9|hqwy$L
zJ|_hI0^8Ae#T_~+a2tN4xLzd3_Zg%V!;orVZ{B33vQLMyQrW9dp$AEnw}kCY>oH$}
z3B`SJI$-_H^@_pv$V*UG&{{u-@w>F&~`B!lE(>E#)eFyIsjQUwyN)TBEM(HxS;{>WN#I;Vi3Fz
zU5vfMp~P~BkdwN#s(0Xr*h$t(-+6bCB{h35VRI|H3@dv_)Dr@SK4%lZVsL(fCn06C
zt@fIHKD=S@Zq2KgJ0856v{*iC;$Cx#UaDO(ix(MQmrtxW4KSv6DeJt(`9*GigZVkM
zCKH#QqL&tcVKcVrB*^*=PrfLH9A${GIloTSPm3!}Didrrdu-JmsB(FHJOdZT&?07n
zkOLkh!0}1rS76|e^3Up8ahXLxs4GGa{q6f%3xK6u`)u>9q9a7i|yUi8QBPwoU
zvh80(BVc=+^8e{aB=!jMg^LBFL0}^cq)FAK1<3-s}<9bSVw_Z>5fVP}$MJW+5-`q49x0_z`uP80Y;%GCJ2Ej7Q|_cN+|P-C<>Pe2&-?
z5J|X+Ii9)zud1rzegz(DarATgxIlkGz8ya<@R(`eYQ~u=UZgPXo$y1lbzIEGX*>k&
W2vAvSp3*x1ltD^D8-dgpdw&OMgDV06
literal 0
HcmV?d00001
diff --git a/docs/api/google/generativeai/chat.md b/docs/api/google/generativeai/chat.md
new file mode 100644
index 000000000..1dc51c8f6
--- /dev/null
+++ b/docs/api/google/generativeai/chat.md
@@ -0,0 +1,198 @@
+description: Calls the API to initiate a chat with a model using provided parameters
+
+
+
+
+
+
+# google.generativeai.chat
+
+
+
+
+
+
+
+Calls the API to initiate a chat with a model using provided parameters
+
+
+
+google.generativeai.chat(
+ *,
+ model: (model_types.AnyModelNameOptions | None) = 'models/chat-bison-001',
+ context: (str | None) = None,
+ examples: (discuss_types.ExamplesOptions | None) = None,
+ messages: (discuss_types.MessagesOptions | None) = None,
+ temperature: (float | None) = None,
+ candidate_count: (int | None) = None,
+ top_p: (float | None) = None,
+ top_k: (float | None) = None,
+ prompt: (discuss_types.MessagePromptOptions | None) = None,
+ client: (glm.DiscussServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> discuss_types.ChatResponse
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+`model`
+ |
+
+Which model to call, as a string or a types.Model .
+ |
+
+
+`context`
+ |
+
+Text that should be provided to the model first, to ground the response.
+
+If not empty, this `context` will be given to the model first before the
+`examples` and `messages`.
+
+This field can be a description of your prompt to the model to help provide
+context and guide the responses.
+
+Examples:
+
+* "Translate the phrase from English to French."
+* "Given a statement, classify the sentiment as happy, sad or neutral."
+
+Anything included in this field will take precedence over history in `messages`
+if the total input size exceeds the model's Model.input_token_limit .
+ |
+
+
+`examples`
+ |
+
+Examples of what the model should generate.
+
+This includes both the user input and the response that the model should
+emulate.
+
+These `examples` are treated identically to conversation messages except
+that they take precedence over the history in `messages`:
+If the total input size exceeds the model's `input_token_limit` the input
+will be truncated. Items will be dropped from `messages` before `examples`
+ |
+
+
+`messages`
+ |
+
+A snapshot of the conversation history sorted chronologically.
+
+Turns alternate between two authors.
+
+If the total input size exceeds the model's `input_token_limit` the input
+will be truncated: The oldest items will be dropped from `messages`.
+ |
+
+
+`temperature`
+ |
+
+Controls the randomness of the output. Must be positive.
+
+Typical values are in the range: `[0.0,1.0]`. Higher values produce a
+more random and varied response. A temperature of zero will be deterministic.
+ |
+
+
+`candidate_count`
+ |
+
+The **maximum** number of generated response messages to return.
+
+This value must be between `[1, 8]`, inclusive. If unset, this
+will default to `1`.
+
+Note: Only unique candidates are returned. Higher temperatures are more
+likely to produce unique candidates. Setting `temperature=0.0` will always
+return 1 candidate regardless of the `candidate_count`.
+ |
+
+
+`top_k`
+ |
+
+The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
+top-k sampling.
+
+`top_k` sets the maximum number of tokens to sample from on each step.
+ |
+
+
+`top_p`
+ |
+
+The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
+top-k sampling.
+
+`top_p` configures the nucleus sampling. It sets the maximum cumulative
+ probability of tokens to sample from.
+
+ For example, if the sorted probabilities are
+ `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
+ as `[0.625, 0.25, 0.125, 0, 0, 0]`.
+
+ Typical values are in the `[0.9, 1.0]` range.
+ |
+
+
+`prompt`
+ |
+
+You may pass a types.MessagePromptOptions **instead** of a
+setting `context`/`examples`/`messages`, but not both.
+ |
+
+
+`client`
+ |
+
+If you're not relying on the default client, you pass a
+`glm.DiscussServiceClient` instead.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/chat_async.md b/docs/api/google/generativeai/chat_async.md
new file mode 100644
index 000000000..614456c1f
--- /dev/null
+++ b/docs/api/google/generativeai/chat_async.md
@@ -0,0 +1,198 @@
+description: Calls the API to initiate a chat with a model using provided parameters
+
+
+
+
+
+
+# google.generativeai.chat_async
+
+
+
+
+
+
+
+Calls the API to initiate a chat with a model using provided parameters
+
+
+
+google.generativeai.chat_async(
+ *,
+ model='models/chat-bison-001',
+ context=None,
+ examples=None,
+ messages=None,
+ temperature=None,
+ candidate_count=None,
+ top_p=None,
+ top_k=None,
+ prompt=None,
+ client=None,
+ request_options=None
+)
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+`model`
+ |
+
+Which model to call, as a string or a types.Model .
+ |
+
+
+`context`
+ |
+
+Text that should be provided to the model first, to ground the response.
+
+If not empty, this `context` will be given to the model first before the
+`examples` and `messages`.
+
+This field can be a description of your prompt to the model to help provide
+context and guide the responses.
+
+Examples:
+
+* "Translate the phrase from English to French."
+* "Given a statement, classify the sentiment as happy, sad or neutral."
+
+Anything included in this field will take precedence over history in `messages`
+if the total input size exceeds the model's Model.input_token_limit .
+ |
+
+
+`examples`
+ |
+
+Examples of what the model should generate.
+
+This includes both the user input and the response that the model should
+emulate.
+
+These `examples` are treated identically to conversation messages except
+that they take precedence over the history in `messages`:
+If the total input size exceeds the model's `input_token_limit` the input
+will be truncated. Items will be dropped from `messages` before `examples`
+ |
+
+
+`messages`
+ |
+
+A snapshot of the conversation history sorted chronologically.
+
+Turns alternate between two authors.
+
+If the total input size exceeds the model's `input_token_limit` the input
+will be truncated: The oldest items will be dropped from `messages`.
+ |
+
+
+`temperature`
+ |
+
+Controls the randomness of the output. Must be positive.
+
+Typical values are in the range: `[0.0,1.0]`. Higher values produce a
+more random and varied response. A temperature of zero will be deterministic.
+ |
+
+
+`candidate_count`
+ |
+
+The **maximum** number of generated response messages to return.
+
+This value must be between `[1, 8]`, inclusive. If unset, this
+will default to `1`.
+
+Note: Only unique candidates are returned. Higher temperatures are more
+likely to produce unique candidates. Setting `temperature=0.0` will always
+return 1 candidate regardless of the `candidate_count`.
+ |
+
+
+`top_k`
+ |
+
+The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
+top-k sampling.
+
+`top_k` sets the maximum number of tokens to sample from on each step.
+ |
+
+
+`top_p`
+ |
+
+The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
+top-k sampling.
+
+`top_p` configures the nucleus sampling. It sets the maximum cumulative
+ probability of tokens to sample from.
+
+ For example, if the sorted probabilities are
+ `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
+ as `[0.625, 0.25, 0.125, 0, 0, 0]`.
+
+ Typical values are in the `[0.9, 1.0]` range.
+ |
+
+
+`prompt`
+ |
+
+You may pass a types.MessagePromptOptions **instead** of a
+setting `context`/`examples`/`messages`, but not both.
+ |
+
+
+`client`
+ |
+
+If you're not relying on the default client, you pass a
+`glm.DiscussServiceClient` instead.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/configure.md b/docs/api/google/generativeai/configure.md
new file mode 100644
index 000000000..f0b5f4006
--- /dev/null
+++ b/docs/api/google/generativeai/configure.md
@@ -0,0 +1,80 @@
+description: Captures default client configuration.
+
+
+
+
+
+
+# google.generativeai.configure
+
+
+
+
+
+
+
+Captures default client configuration.
+
+
+
+google.generativeai.configure(
+ *,
+ api_key: (str | None) = None,
+ credentials: (ga_credentials.Credentials | dict | None) = None,
+ transport: (str | None) = None,
+ client_options: (client_options_lib.ClientOptions | dict | None) = None,
+ client_info: (gapic_v1.client_info.ClientInfo | None) = None,
+ default_metadata: Sequence[tuple[str, str]] = ()
+)
+
+
+
+
+
+
+If no API key has been provided (either directly, or on `client_options`) and the
+`GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
+
+Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in
+`google.ai.generativelanguage` for details on the other arguments.
+
+
+
+
+Args |
+
+
+
+`transport`
+ |
+
+A string, one of: [`rest`, `grpc`, `grpc_asyncio`].
+ |
+
+
+`api_key`
+ |
+
+The API-Key to use when creating the default clients (each service uses
+a separate client). This is a shortcut for `client_options={"api_key": api_key}`.
+If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be
+used.
+ |
+
+
+`default_metadata`
+ |
+
+Default (key, value) metadata pairs to send with every request.
+when using `transport="rest"` these are sent as HTTP headers.
+ |
+
+
+
diff --git a/docs/api/google/generativeai/count_message_tokens.md b/docs/api/google/generativeai/count_message_tokens.md
new file mode 100644
index 000000000..7ec05db9b
--- /dev/null
+++ b/docs/api/google/generativeai/count_message_tokens.md
@@ -0,0 +1,41 @@
+description: Calls the API to calculate the number of tokens used in the prompt.
+
+
+
+
+
+
+# google.generativeai.count_message_tokens
+
+
+
+
+
+
+
+Calls the API to calculate the number of tokens used in the prompt.
+
+
+
+google.generativeai.count_message_tokens(
+ *,
+ prompt: discuss_types.MessagePromptOptions = None,
+ context: (str | None) = None,
+ examples: (discuss_types.ExamplesOptions | None) = None,
+ messages: (discuss_types.MessagesOptions | None) = None,
+ model: model_types.AnyModelNameOptions = DEFAULT_DISCUSS_MODEL,
+ client: (glm.DiscussServiceAsyncClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> discuss_types.TokenCount
+
+
+
+
+
diff --git a/docs/api/google/generativeai/count_text_tokens.md b/docs/api/google/generativeai/count_text_tokens.md
new file mode 100644
index 000000000..a15f0f2aa
--- /dev/null
+++ b/docs/api/google/generativeai/count_text_tokens.md
@@ -0,0 +1,37 @@
+description: Calls the API to count the number of tokens in the text prompt.
+
+
+
+
+
+
+# google.generativeai.count_text_tokens
+
+
+
+
+
+
+
+Calls the API to count the number of tokens in the text prompt.
+
+
+
+google.generativeai.count_text_tokens(
+ model: model_types.AnyModelNameOptions,
+ prompt: str,
+ client: (glm.TextServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> text_types.TokenCount
+
+
+
+
+
diff --git a/docs/api/google/generativeai/create_tuned_model.md b/docs/api/google/generativeai/create_tuned_model.md
new file mode 100644
index 000000000..c12179164
--- /dev/null
+++ b/docs/api/google/generativeai/create_tuned_model.md
@@ -0,0 +1,198 @@
+description: Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
+
+
+
+
+
+
+# google.generativeai.create_tuned_model
+
+
+
+
+
+
+
+Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
+
+
+
+google.generativeai.create_tuned_model(
+ source_model: model_types.AnyModelNameOptions,
+ training_data: model_types.TuningDataOptions,
+ *,
+ id: (str | None) = None,
+ display_name: (str | None) = None,
+ description: (str | None) = None,
+ temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (int | None) = None,
+ epoch_count: (int | None) = None,
+ batch_size: (int | None) = None,
+ learning_rate: (float | None) = None,
+ input_key: str = 'text_input',
+ output_key: str = 'output',
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> operations.CreateTunedModelOperation
+
+
+
+
+
+
+Since tuning a model can take significant time, this API doesn't wait for the tuning to complete.
+Instead, it returns a `google.api_core.operation.Operation` object that lets you check on the
+status of the tuning job, or wait for it to complete, and check the result.
+
+After the job completes you can either find the resulting `TunedModel` object in
+`Operation.result()` or `palm.list_tuned_models` or `palm.get_tuned_model(model_id)`.
+
+```
+my_id = "my-tuned-model-id"
+operation = palm.create_tuned_model(
+ id = my_id,
+ source_model="models/text-bison-001",
+ training_data=[{'text_input': 'example input', 'output': 'example output'},...]
+)
+tuned_model=operation.result() # Wait for tuning to finish
+
+palm.generate_text(f"tunedModels/{my_id}", prompt="...")
+```
+
+
+
+
+Args |
+
+
+
+`source_model`
+ |
+
+The name of the model to tune.
+ |
+
+
+`training_data`
+ |
+
+The dataset to tune the model on. This must be either:
+* A protos.Dataset , or
+* An `Iterable` of:
+ *protos.TuningExample ,
+ * `{'text_input': text_input, 'output': output}` dicts
+ * `(text_input, output)` tuples.
+* A `Mapping` of `Iterable[str]` - use `input_key` and `output_key` to choose which
+ columns to use as the input/output
+* A csv file (will be read with `pd.read_csv` and handles as a `Mapping`
+ above). This can be:
+ * A local path as a `str` or `pathlib.Path`.
+ * A url for a csv file.
+ * The url of a Google Sheets file.
+* A JSON file - Its contents will be handled either as an `Iterable` or `Mapping`
+ above. This can be:
+ * A local path as a `str` or `pathlib.Path`.
+ |
+
+
+`id`
+ |
+
+The model identifier, used to refer to the model in the API
+`tunedModels/{id}`. Must be unique.
+ |
+
+
+`display_name`
+ |
+
+A human-readable name for display.
+ |
+
+
+`description`
+ |
+
+A description of the tuned model.
+ |
+
+
+`temperature`
+ |
+
+The default temperature for the tuned model, see types.Model for details.
+ |
+
+
+`top_p`
+ |
+
+The default `top_p` for the model, see types.Model for details.
+ |
+
+
+`top_k`
+ |
+
+The default `top_k` for the model, see types.Model for details.
+ |
+
+
+`epoch_count`
+ |
+
+The number of tuning epochs to run. An epoch is a pass over the whole dataset.
+ |
+
+
+`batch_size`
+ |
+
+The number of examples to use in each training batch.
+ |
+
+
+`learning_rate`
+ |
+
+The step size multiplier for the gradient updates.
+ |
+
+
+`client`
+ |
+
+Which client to use.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+A [`google.api_core.operation.Operation`](https://googleapis.dev/python/google-api-core/latest/operation.html)
+ |
+
+
+
+
diff --git a/docs/api/google/generativeai/delete_file.md b/docs/api/google/generativeai/delete_file.md
new file mode 100644
index 000000000..1098c3afb
--- /dev/null
+++ b/docs/api/google/generativeai/delete_file.md
@@ -0,0 +1,34 @@
+description: Calls the API to permanently delete a specified file using a supported file service.
+
+
+
+
+
+
+# google.generativeai.delete_file
+
+
+
+
+
+
+
+Calls the API to permanently delete a specified file using a supported file service.
+
+
+
+google.generativeai.delete_file(
+ name: (str | file_types.File | protos.File)
+)
+
+
+
+
+
diff --git a/docs/api/google/generativeai/delete_tuned_model.md b/docs/api/google/generativeai/delete_tuned_model.md
new file mode 100644
index 000000000..d5c4fa089
--- /dev/null
+++ b/docs/api/google/generativeai/delete_tuned_model.md
@@ -0,0 +1,36 @@
+description: Calls the API to delete a specified tuned model
+
+
+
+
+
+
+# google.generativeai.delete_tuned_model
+
+
+
+
+
+
+
+Calls the API to delete a specified tuned model
+
+
+
+google.generativeai.delete_tuned_model(
+ tuned_model: model_types.TunedModelNameOptions,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> None
+
+
+
+
+
diff --git a/docs/api/google/generativeai/embed_content.md b/docs/api/google/generativeai/embed_content.md
new file mode 100644
index 000000000..e6cb45c6f
--- /dev/null
+++ b/docs/api/google/generativeai/embed_content.md
@@ -0,0 +1,112 @@
+description: Calls the API to create embeddings for content passed in.
+
+
+
+
+
+
+# google.generativeai.embed_content
+
+
+
+
+
+
+
+Calls the API to create embeddings for content passed in.
+
+
+
+google.generativeai.embed_content(
+ model: model_types.BaseModelNameOptions,
+ content: (content_types.ContentType | Iterable[content_types.ContentType]),
+ task_type: (EmbeddingTaskTypeOptions | None) = None,
+ title: (str | None) = None,
+ output_dimensionality: (int | None) = None,
+ client: glm.GenerativeServiceClient = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> (text_types.EmbeddingDict | text_types.BatchEmbeddingDict)
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+`model`
+ |
+
+ Which [model](https://ai.google.dev/models/gemini#embedding) to
+call, as a string or a types.Model .
+ |
+
+
+`content`
+ |
+
+ Content to embed.
+ |
+
+
+`task_type`
+ |
+
+ Optional task type for which the embeddings will be used. Can only
+be set for `models/embedding-001`.
+ |
+
+
+`title`
+ |
+
+ An optional title for the text. Only applicable when task_type is
+`RETRIEVAL_DOCUMENT`.
+ |
+
+
+`output_dimensionality`
+ |
+
+ Optional reduced dimensionality for the output embeddings. If set,
+excessive values from the output embeddings will be truncated from
+the end.
+ |
+
+
+`request_options`
+ |
+
+ Options for the request.
+ |
+
+
+
+
+
+
+
+
+Return |
+
+
+Dictionary containing the embedding (list of float values) for the
+input content.
+ |
+
+
+
+
diff --git a/docs/api/google/generativeai/embed_content_async.md b/docs/api/google/generativeai/embed_content_async.md
new file mode 100644
index 000000000..bbf132fd1
--- /dev/null
+++ b/docs/api/google/generativeai/embed_content_async.md
@@ -0,0 +1,40 @@
+description: Calls the API to create async embeddings for content passed in.
+
+
+
+
+
+
+# google.generativeai.embed_content_async
+
+
+
+
+
+
+
+Calls the API to create async embeddings for content passed in.
+
+
+
+google.generativeai.embed_content_async(
+ model,
+ content,
+ task_type=None,
+ title=None,
+ output_dimensionality=None,
+ client=None,
+ request_options=None
+)
+
+
+
+
+
diff --git a/docs/api/google/generativeai/generate_embeddings.md b/docs/api/google/generativeai/generate_embeddings.md
new file mode 100644
index 000000000..9d1fd8beb
--- /dev/null
+++ b/docs/api/google/generativeai/generate_embeddings.md
@@ -0,0 +1,90 @@
+description: Calls the API to create an embedding for the text passed in.
+
+
+
+
+
+
+# google.generativeai.generate_embeddings
+
+
+
+
+
+
+
+Calls the API to create an embedding for the text passed in.
+
+
+
+google.generativeai.generate_embeddings(
+ model: model_types.BaseModelNameOptions,
+ text: (str | Sequence[str]),
+ client: glm.TextServiceClient = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> (text_types.EmbeddingDict | text_types.BatchEmbeddingDict)
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+`model`
+ |
+
+Which model to call, as a string or a types.Model .
+ |
+
+
+`text`
+ |
+
+Free-form input text given to the model. Given a string, the model will
+generate an embedding based on the input text.
+ |
+
+
+`client`
+ |
+
+If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+Dictionary containing the embedding (list of float values) for the input text.
+ |
+
+
+
+
diff --git a/docs/api/google/generativeai/generate_text.md b/docs/api/google/generativeai/generate_text.md
new file mode 100644
index 000000000..91225fd70
--- /dev/null
+++ b/docs/api/google/generativeai/generate_text.md
@@ -0,0 +1,172 @@
+description: Calls the API to generate text based on the provided prompt.
+
+
+
+
+
+
+# google.generativeai.generate_text
+
+
+
+
+
+
+
+Calls the API to generate text based on the provided prompt.
+
+
+
+google.generativeai.generate_text(
+ *,
+ model: model_types.AnyModelNameOptions = DEFAULT_TEXT_MODEL,
+ prompt: str,
+ temperature: (float | None) = None,
+ candidate_count: (int | None) = None,
+ max_output_tokens: (int | None) = None,
+ top_p: (float | None) = None,
+ top_k: (float | None) = None,
+ safety_settings: (palm_safety_types.SafetySettingOptions | None) = None,
+ stop_sequences: (str | Iterable[str] | None) = None,
+ client: (glm.TextServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> text_types.Completion
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+`model`
+ |
+
+Which model to call, as a string or a types.Model .
+ |
+
+
+`prompt`
+ |
+
+Free-form input text given to the model. Given a prompt, the model will
+generate text that completes the input text.
+ |
+
+
+`temperature`
+ |
+
+Controls the randomness of the output. Must be positive.
+Typical values are in the range: `[0.0,1.0]`. Higher values produce a
+more random and varied response. A temperature of zero will be deterministic.
+ |
+
+
+`candidate_count`
+ |
+
+The **maximum** number of generated response messages to return.
+This value must be between `[1, 8]`, inclusive. If unset, this
+will default to `1`.
+
+Note: Only unique candidates are returned. Higher temperatures are more
+likely to produce unique candidates. Setting `temperature=0.0` will always
+return 1 candidate regardless of the `candidate_count`.
+ |
+
+
+`max_output_tokens`
+ |
+
+Maximum number of tokens to include in a candidate. Must be greater
+than zero. If unset, will default to 64.
+ |
+
+
+`top_k`
+ |
+
+The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
+`top_k` sets the maximum number of tokens to sample from on each step.
+ |
+
+
+`top_p`
+ |
+
+The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
+`top_p` configures the nucleus sampling. It sets the maximum cumulative
+probability of tokens to sample from.
+For example, if the sorted probabilities are
+`[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
+as `[0.625, 0.25, 0.125, 0, 0, 0]`.
+ |
+
+
+`safety_settings`
+ |
+
+A list of unique `types.SafetySetting` instances for blocking unsafe content.
+These will be enforced on the `prompt` and
+`candidates`. There should not be more than one
+setting for each `types.SafetyCategory` type. The API will block any prompts and
+responses that fail to meet the thresholds set by these settings. This list
+overrides the default settings for each `SafetyCategory` specified in the
+safety_settings. If there is no `types.SafetySetting` for a given
+`SafetyCategory` provided in the list, the API will use the default safety
+setting for that category.
+ |
+
+
+`stop_sequences`
+ |
+
+A set of up to 5 character sequences that will stop output generation.
+If specified, the API will stop at the first appearance of a stop
+sequence. The stop sequence will not be included as part of the response.
+ |
+
+
+`client`
+ |
+
+If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+A types.Completion containing the model's text completion response.
+ |
+
+
+
+
diff --git a/docs/api/google/generativeai/get_base_model.md b/docs/api/google/generativeai/get_base_model.md
new file mode 100644
index 000000000..a71ef12f5
--- /dev/null
+++ b/docs/api/google/generativeai/get_base_model.md
@@ -0,0 +1,87 @@
+description: Calls the API to fetch a base model by name.
+
+
+
+
+
+
+# google.generativeai.get_base_model
+
+
+
+
+
+
+
+Calls the API to fetch a base model by name.
+
+
+
+google.generativeai.get_base_model(
+ name: model_types.BaseModelNameOptions,
+ *,
+ client=None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.Model
+
+
+
+
+
+
+```
+import pprint
+model = genai.get_base_model('models/chat-bison-001')
+pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+The name of the model to fetch. Should start with `models/`
+ |
+
+
+`client`
+ |
+
+The client to use.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_file.md b/docs/api/google/generativeai/get_file.md
new file mode 100644
index 000000000..5377161be
--- /dev/null
+++ b/docs/api/google/generativeai/get_file.md
@@ -0,0 +1,34 @@
+description: Calls the API to retrieve a specified file using a supported file service.
+
+
+
+
+
+
+# google.generativeai.get_file
+
+
+
+
+
+
+
+Calls the API to retrieve a specified file using a supported file service.
+
+
+
+google.generativeai.get_file(
+ name: str
+) -> file_types.File
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_model.md b/docs/api/google/generativeai/get_model.md
new file mode 100644
index 000000000..e488dbfaa
--- /dev/null
+++ b/docs/api/google/generativeai/get_model.md
@@ -0,0 +1,87 @@
+description: Calls the API to fetch a model by name.
+
+
+
+
+
+
+# google.generativeai.get_model
+
+
+
+
+
+
+
+Calls the API to fetch a model by name.
+
+
+
+google.generativeai.get_model(
+ name: model_types.AnyModelNameOptions,
+ *,
+ client=None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> (model_types.Model | model_types.TunedModel)
+
+
+
+
+
+
+```
+import pprint
+model = genai.get_model('models/gemini-pro')
+pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+The name of the model to fetch. Should start with `models/`
+ |
+
+
+`client`
+ |
+
+The client to use.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_operation.md b/docs/api/google/generativeai/get_operation.md
new file mode 100644
index 000000000..74bb706e6
--- /dev/null
+++ b/docs/api/google/generativeai/get_operation.md
@@ -0,0 +1,34 @@
+description: Calls the API to get a specific operation
+
+
+
+
+
+
+# google.generativeai.get_operation
+
+
+
+
+
+
+
+Calls the API to get a specific operation
+
+
+
+google.generativeai.get_operation(
+ name: str, *, client=None
+) -> CreateTunedModelOperation
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_tuned_model.md b/docs/api/google/generativeai/get_tuned_model.md
new file mode 100644
index 000000000..f420a3beb
--- /dev/null
+++ b/docs/api/google/generativeai/get_tuned_model.md
@@ -0,0 +1,87 @@
+description: Calls the API to fetch a tuned model by name.
+
+
+
+
+
+
+# google.generativeai.get_tuned_model
+
+
+
+
+
+
+
+Calls the API to fetch a tuned model by name.
+
+
+
+google.generativeai.get_tuned_model(
+ name: model_types.TunedModelNameOptions,
+ *,
+ client=None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.TunedModel
+
+
+
+
+
+
+```
+import pprint
+model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001')
+pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+The name of the model to fetch. Should start with `tunedModels/`
+ |
+
+
+`client`
+ |
+
+The client to use.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_files.md b/docs/api/google/generativeai/list_files.md
new file mode 100644
index 000000000..ccb3cb453
--- /dev/null
+++ b/docs/api/google/generativeai/list_files.md
@@ -0,0 +1,34 @@
+description: Calls the API to list files using a supported file service.
+
+
+
+
+
+
+# google.generativeai.list_files
+
+
+
+
+
+
+
+Calls the API to list files using a supported file service.
+
+
+
+google.generativeai.list_files(
+ page_size=100
+) -> Iterable[file_types.File]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_models.md b/docs/api/google/generativeai/list_models.md
new file mode 100644
index 000000000..5d19f917b
--- /dev/null
+++ b/docs/api/google/generativeai/list_models.md
@@ -0,0 +1,87 @@
+description: Calls the API to list all available models.
+
+
+
+
+
+
+# google.generativeai.list_models
+
+
+
+
+
+
+
+Calls the API to list all available models.
+
+
+
+google.generativeai.list_models(
+ *,
+ page_size: (int | None) = 50,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.ModelsIterable
+
+
+
+
+
+
+```
+import pprint
+for model in genai.list_models():
+ pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+`page_size`
+ |
+
+How many `types.Models` to fetch per page (api call).
+ |
+
+
+`client`
+ |
+
+You may pass a `glm.ModelServiceClient` instead of using the default client.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_operations.md b/docs/api/google/generativeai/list_operations.md
new file mode 100644
index 000000000..bfcd6f641
--- /dev/null
+++ b/docs/api/google/generativeai/list_operations.md
@@ -0,0 +1,34 @@
+description: Calls the API to list all operations
+
+
+
+
+
+
+# google.generativeai.list_operations
+
+
+
+
+
+
+
+Calls the API to list all operations
+
+
+
+google.generativeai.list_operations(
+ *, client=None
+) -> Iterator[CreateTunedModelOperation]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_tuned_models.md b/docs/api/google/generativeai/list_tuned_models.md
new file mode 100644
index 000000000..07306d3c7
--- /dev/null
+++ b/docs/api/google/generativeai/list_tuned_models.md
@@ -0,0 +1,87 @@
+description: Calls the API to list all tuned models.
+
+
+
+
+
+
+# google.generativeai.list_tuned_models
+
+
+
+
+
+
+
+Calls the API to list all tuned models.
+
+
+
+google.generativeai.list_tuned_models(
+ *,
+ page_size: (int | None) = 50,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.TunedModelsIterable
+
+
+
+
+
+
+```
+import pprint
+for model in genai.list_tuned_models():
+ pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+`page_size`
+ |
+
+How many `types.Models` to fetch per page (api call).
+ |
+
+
+`client`
+ |
+
+You may pass a `glm.ModelServiceClient` instead of using the default client.
+ |
+
+
+`request_options`
+ |
+
+Options for the request.
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos.md b/docs/api/google/generativeai/protos.md
new file mode 100644
index 000000000..8ab8c8efc
--- /dev/null
+++ b/docs/api/google/generativeai/protos.md
@@ -0,0 +1,368 @@
+description: This module provides low level access to the ProtoBuffer "Message" classes used by the API.
+
+
+
+
+
+
+# Module: google.generativeai.protos
+
+
+
+
+
+
+
+This module provides low level access to the ProtoBuffer "Message" classes used by the API.
+
+
+**For typical usage of this SDK you do not need to use any of these classes.**
+
+ProtoBufers are Google API's serilization format. They are strongly typed and efficient.
+
+The `genai` SDK tries to be permissive about what objects it will accept from a user, but in the end
+the SDK always converts input to an appropriate Proto Message object to send as the request. Each API request
+has a `*Request` and `*Response` Message defined here.
+
+If you have any uncertainty about what the API may accept or return, these classes provide the
+complete/unambiguous answer. They come from the `google-ai-generativelanguage` package which is
+generated from a snapshot of the API definition.
+
+```
+>>> from google.generativeai import protos
+>>> import inspect
+>>> print(inspect.getsource(protos.Part))
+```
+
+Proto classes can have "oneof" fields. Use `in` to check which `oneof` field is set.
+
+```
+>>> p = protos.Part(text='hello')
+>>> 'text' in p
+True
+>>> p.inline_data = {'mime_type':'image/png', 'data': b'PNG'}
+>>> type(p.inline_data) is protos.Blob
+True
+>>> 'inline_data' in p
+True
+>>> 'text' in p
+False
+```
+
+Instances of all Message classes can be converted into JSON compatible dictionaries with the following construct
+(Bytes are base64 encoded):
+
+```
+>>> p_dict = type(p).to_dict(p)
+>>> p_dict
+{'inline_data': {'mime_type': 'image/png', 'data': 'UE5H'}}
+```
+
+A compatible dict can be converted to an instance of a Message class by passing it as the first argument to the
+constructor:
+
+```
+>>> p = protos.Part(p_dict)
+inline_data {
+ mime_type: "image/png"
+ data: "PNG"
+}
+```
+
+Note when converting that `to_dict` accepts additional arguments:
+
+- `use_integers_for_enums:bool = True`, Set it to `False` to replace enum int values with their string
+ names in the output
+- ` including_default_value_fields:bool = True`, Set it to `False` to reduce the verbosity of the output.
+
+Additional arguments are described in the docstring:
+
+```
+>>> help(proto.Part.to_dict)
+```
+
+## Classes
+
+[`class AttributionSourceId`](../../google/generativeai/protos/AttributionSourceId.md): Identifier for the source contributing to this attribution.
+
+[`class BatchCreateChunksRequest`](../../google/generativeai/protos/BatchCreateChunksRequest.md): Request to batch create ``Chunk``\ s.
+
+[`class BatchCreateChunksResponse`](../../google/generativeai/protos/BatchCreateChunksResponse.md): Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s.
+
+[`class BatchDeleteChunksRequest`](../../google/generativeai/protos/BatchDeleteChunksRequest.md): Request to batch delete ``Chunk``\ s.
+
+[`class BatchEmbedContentsRequest`](../../google/generativeai/protos/BatchEmbedContentsRequest.md): Batch request to get embeddings from the model for a list of prompts.
+
+[`class BatchEmbedContentsResponse`](../../google/generativeai/protos/BatchEmbedContentsResponse.md): The response to a ``BatchEmbedContentsRequest``.
+
+[`class BatchEmbedTextRequest`](../../google/generativeai/protos/BatchEmbedTextRequest.md): Batch request to get a text embedding from the model.
+
+[`class BatchEmbedTextResponse`](../../google/generativeai/protos/BatchEmbedTextResponse.md): The response to a EmbedTextRequest.
+
+[`class BatchUpdateChunksRequest`](../../google/generativeai/protos/BatchUpdateChunksRequest.md): Request to batch update ``Chunk``\ s.
+
+[`class BatchUpdateChunksResponse`](../../google/generativeai/protos/BatchUpdateChunksResponse.md): Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s.
+
+[`class Blob`](../../google/generativeai/protos/Blob.md): Raw media bytes.
+
+[`class CachedContent`](../../google/generativeai/protos/CachedContent.md): Content that has been preprocessed and can be used in subsequent request to GenerativeService.
+
+[`class Candidate`](../../google/generativeai/protos/Candidate.md): A response candidate generated from the model.
+
+[`class Chunk`](../../google/generativeai/protos/Chunk.md): A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage.
+
+[`class ChunkData`](../../google/generativeai/protos/ChunkData.md): Extracted data that represents the ``Chunk`` content.
+
+[`class CitationMetadata`](../../google/generativeai/protos/CitationMetadata.md): A collection of source attributions for a piece of content.
+
+[`class CitationSource`](../../google/generativeai/protos/CitationSource.md): A citation to a source for a portion of a specific response.
+
+[`class CodeExecution`](../../google/generativeai/protos/CodeExecution.md): Tool that executes code generated by the model, and automatically returns the result to the model.
+
+[`class CodeExecutionResult`](../../google/generativeai/protos/CodeExecutionResult.md): Result of executing the ``ExecutableCode``.
+
+[`class Condition`](../../google/generativeai/protos/Condition.md): Filter condition applicable to a single key.
+
+[`class Content`](../../google/generativeai/protos/Content.md): The base structured datatype containing multi-part content of a message.
+
+[`class ContentEmbedding`](../../google/generativeai/protos/ContentEmbedding.md): A list of floats representing an embedding.
+
+[`class ContentFilter`](../../google/generativeai/protos/ContentFilter.md): Content filtering metadata associated with processing a single request.
+
+[`class Corpus`](../../google/generativeai/protos/Corpus.md): A ``Corpus`` is a collection of ``Document``\ s.
+
+[`class CountMessageTokensRequest`](../../google/generativeai/protos/CountMessageTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
+
+[`class CountMessageTokensResponse`](../../google/generativeai/protos/CountMessageTokensResponse.md): A response from ``CountMessageTokens``.
+
+[`class CountTextTokensRequest`](../../google/generativeai/protos/CountTextTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
+
+[`class CountTextTokensResponse`](../../google/generativeai/protos/CountTextTokensResponse.md): A response from ``CountTextTokens``.
+
+[`class CountTokensRequest`](../../google/generativeai/protos/CountTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
+
+[`class CountTokensResponse`](../../google/generativeai/protos/CountTokensResponse.md): A response from ``CountTokens``.
+
+[`class CreateCachedContentRequest`](../../google/generativeai/protos/CreateCachedContentRequest.md): Request to create CachedContent.
+
+[`class CreateChunkRequest`](../../google/generativeai/protos/CreateChunkRequest.md): Request to create a ``Chunk``.
+
+[`class CreateCorpusRequest`](../../google/generativeai/protos/CreateCorpusRequest.md): Request to create a ``Corpus``.
+
+[`class CreateDocumentRequest`](../../google/generativeai/protos/CreateDocumentRequest.md): Request to create a ``Document``.
+
+[`class CreateFileRequest`](../../google/generativeai/protos/CreateFileRequest.md): Request for ``CreateFile``.
+
+[`class CreateFileResponse`](../../google/generativeai/protos/CreateFileResponse.md): Response for ``CreateFile``.
+
+[`class CreatePermissionRequest`](../../google/generativeai/protos/CreatePermissionRequest.md): Request to create a ``Permission``.
+
+[`class CreateTunedModelMetadata`](../../google/generativeai/protos/CreateTunedModelMetadata.md): Metadata about the state and progress of creating a tuned model returned from the long-running operation
+
+[`class CreateTunedModelRequest`](../../google/generativeai/protos/CreateTunedModelRequest.md): Request to create a TunedModel.
+
+[`class CustomMetadata`](../../google/generativeai/protos/CustomMetadata.md): User provided metadata stored as key-value pairs.
+
+[`class Dataset`](../../google/generativeai/protos/Dataset.md): Dataset for training or validation.
+
+[`class DeleteCachedContentRequest`](../../google/generativeai/protos/DeleteCachedContentRequest.md): Request to delete CachedContent.
+
+[`class DeleteChunkRequest`](../../google/generativeai/protos/DeleteChunkRequest.md): Request to delete a ``Chunk``.
+
+[`class DeleteCorpusRequest`](../../google/generativeai/protos/DeleteCorpusRequest.md): Request to delete a ``Corpus``.
+
+[`class DeleteDocumentRequest`](../../google/generativeai/protos/DeleteDocumentRequest.md): Request to delete a ``Document``.
+
+[`class DeleteFileRequest`](../../google/generativeai/protos/DeleteFileRequest.md): Request for ``DeleteFile``.
+
+[`class DeletePermissionRequest`](../../google/generativeai/protos/DeletePermissionRequest.md): Request to delete the ``Permission``.
+
+[`class DeleteTunedModelRequest`](../../google/generativeai/protos/DeleteTunedModelRequest.md): Request to delete a TunedModel.
+
+[`class Document`](../../google/generativeai/protos/Document.md): A ``Document`` is a collection of ``Chunk``\ s.
+
+[`class EmbedContentRequest`](../../google/generativeai/protos/EmbedContentRequest.md): Request containing the ``Content`` for the model to embed.
+
+[`class EmbedContentResponse`](../../google/generativeai/protos/EmbedContentResponse.md): The response to an ``EmbedContentRequest``.
+
+[`class EmbedTextRequest`](../../google/generativeai/protos/EmbedTextRequest.md): Request to get a text embedding from the model.
+
+[`class EmbedTextResponse`](../../google/generativeai/protos/EmbedTextResponse.md): The response to a EmbedTextRequest.
+
+[`class Embedding`](../../google/generativeai/protos/Embedding.md): A list of floats representing the embedding.
+
+[`class Example`](../../google/generativeai/protos/Example.md): An input/output example used to instruct the Model.
+
+[`class ExecutableCode`](../../google/generativeai/protos/ExecutableCode.md): Code generated by the model that is meant to be executed, and the result returned to the model.
+
+[`class File`](../../google/generativeai/protos/File.md): A file uploaded to the API.
+
+[`class FileData`](../../google/generativeai/protos/FileData.md): URI based data.
+
+[`class FunctionCall`](../../google/generativeai/protos/FunctionCall.md): A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
+
+[`class FunctionCallingConfig`](../../google/generativeai/protos/FunctionCallingConfig.md): Configuration for specifying function calling behavior.
+
+[`class FunctionDeclaration`](../../google/generativeai/protos/FunctionDeclaration.md): Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__.
+
+[`class FunctionResponse`](../../google/generativeai/protos/FunctionResponse.md): The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
+
+[`class GenerateAnswerRequest`](../../google/generativeai/protos/GenerateAnswerRequest.md): Request to generate a grounded answer from the model.
+
+[`class GenerateAnswerResponse`](../../google/generativeai/protos/GenerateAnswerResponse.md): Response from the model for a grounded answer.
+
+[`class GenerateContentRequest`](../../google/generativeai/protos/GenerateContentRequest.md): Request to generate a completion from the model.
+
+[`class GenerateContentResponse`](../../google/generativeai/protos/GenerateContentResponse.md): Response from the model supporting multiple candidates.
+
+[`class GenerateMessageRequest`](../../google/generativeai/protos/GenerateMessageRequest.md): Request to generate a message response from the model.
+
+[`class GenerateMessageResponse`](../../google/generativeai/protos/GenerateMessageResponse.md): The response from the model.
+
+[`class GenerateTextRequest`](../../google/generativeai/protos/GenerateTextRequest.md): Request to generate a text completion response from the model.
+
+[`class GenerateTextResponse`](../../google/generativeai/protos/GenerateTextResponse.md): The response from the model, including candidate completions.
+
+[`class GenerationConfig`](../../google/generativeai/protos/GenerationConfig.md): Configuration options for model generation and outputs.
+
+[`class GetCachedContentRequest`](../../google/generativeai/protos/GetCachedContentRequest.md): Request to read CachedContent.
+
+[`class GetChunkRequest`](../../google/generativeai/protos/GetChunkRequest.md): Request for getting information about a specific ``Chunk``.
+
+[`class GetCorpusRequest`](../../google/generativeai/protos/GetCorpusRequest.md): Request for getting information about a specific ``Corpus``.
+
+[`class GetDocumentRequest`](../../google/generativeai/protos/GetDocumentRequest.md): Request for getting information about a specific ``Document``.
+
+[`class GetFileRequest`](../../google/generativeai/protos/GetFileRequest.md): Request for ``GetFile``.
+
+[`class GetModelRequest`](../../google/generativeai/protos/GetModelRequest.md): Request for getting information about a specific Model.
+
+[`class GetPermissionRequest`](../../google/generativeai/protos/GetPermissionRequest.md): Request for getting information about a specific ``Permission``.
+
+[`class GetTunedModelRequest`](../../google/generativeai/protos/GetTunedModelRequest.md): Request for getting information about a specific Model.
+
+[`class GroundingAttribution`](../../google/generativeai/protos/GroundingAttribution.md): Attribution for a source that contributed to an answer.
+
+[`class GroundingPassage`](../../google/generativeai/protos/GroundingPassage.md): Passage included inline with a grounding configuration.
+
+[`class GroundingPassages`](../../google/generativeai/protos/GroundingPassages.md): A repeated list of passages.
+
+[`class HarmCategory`](../../google/generativeai/protos/HarmCategory.md): The category of a rating.
+
+[`class Hyperparameters`](../../google/generativeai/protos/Hyperparameters.md): Hyperparameters controlling the tuning process.
+
+[`class ListCachedContentsRequest`](../../google/generativeai/protos/ListCachedContentsRequest.md): Request to list CachedContents.
+
+[`class ListCachedContentsResponse`](../../google/generativeai/protos/ListCachedContentsResponse.md): Response with CachedContents list.
+
+[`class ListChunksRequest`](../../google/generativeai/protos/ListChunksRequest.md): Request for listing ``Chunk``\ s.
+
+[`class ListChunksResponse`](../../google/generativeai/protos/ListChunksResponse.md): Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s.
+
+[`class ListCorporaRequest`](../../google/generativeai/protos/ListCorporaRequest.md): Request for listing ``Corpora``.
+
+[`class ListCorporaResponse`](../../google/generativeai/protos/ListCorporaResponse.md): Response from ``ListCorpora`` containing a paginated list of ``Corpora``.
+
+[`class ListDocumentsRequest`](../../google/generativeai/protos/ListDocumentsRequest.md): Request for listing ``Document``\ s.
+
+[`class ListDocumentsResponse`](../../google/generativeai/protos/ListDocumentsResponse.md): Response from ``ListDocuments`` containing a paginated list of ``Document``\ s.
+
+[`class ListFilesRequest`](../../google/generativeai/protos/ListFilesRequest.md): Request for ``ListFiles``.
+
+[`class ListFilesResponse`](../../google/generativeai/protos/ListFilesResponse.md): Response for ``ListFiles``.
+
+[`class ListModelsRequest`](../../google/generativeai/protos/ListModelsRequest.md): Request for listing all Models.
+
+[`class ListModelsResponse`](../../google/generativeai/protos/ListModelsResponse.md): Response from ``ListModel`` containing a paginated list of Models.
+
+[`class ListPermissionsRequest`](../../google/generativeai/protos/ListPermissionsRequest.md): Request for listing permissions.
+
+[`class ListPermissionsResponse`](../../google/generativeai/protos/ListPermissionsResponse.md): Response from ``ListPermissions`` containing a paginated list of permissions.
+
+[`class ListTunedModelsRequest`](../../google/generativeai/protos/ListTunedModelsRequest.md): Request for listing TunedModels.
+
+[`class ListTunedModelsResponse`](../../google/generativeai/protos/ListTunedModelsResponse.md): Response from ``ListTunedModels`` containing a paginated list of Models.
+
+[`class Message`](../../google/generativeai/protos/Message.md): The base unit of structured text.
+
+[`class MessagePrompt`](../../google/generativeai/protos/MessagePrompt.md): All of the structured input text passed to the model as a prompt.
+
+[`class MetadataFilter`](../../google/generativeai/protos/MetadataFilter.md): User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values.
+
+[`class Model`](../../google/generativeai/protos/Model.md): Information about a Generative Language Model.
+
+[`class Part`](../../google/generativeai/protos/Part.md): A datatype containing media that is part of a multi-part ``Content`` message.
+
+[`class Permission`](../../google/generativeai/protos/Permission.md): Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
+
+[`class QueryCorpusRequest`](../../google/generativeai/protos/QueryCorpusRequest.md): Request for querying a ``Corpus``.
+
+[`class QueryCorpusResponse`](../../google/generativeai/protos/QueryCorpusResponse.md): Response from ``QueryCorpus`` containing a list of relevant chunks.
+
+[`class QueryDocumentRequest`](../../google/generativeai/protos/QueryDocumentRequest.md): Request for querying a ``Document``.
+
+[`class QueryDocumentResponse`](../../google/generativeai/protos/QueryDocumentResponse.md): Response from ``QueryDocument`` containing a list of relevant chunks.
+
+[`class RelevantChunk`](../../google/generativeai/protos/RelevantChunk.md): The information for a chunk relevant to a query.
+
+[`class SafetyFeedback`](../../google/generativeai/protos/SafetyFeedback.md): Safety feedback for an entire request.
+
+[`class SafetyRating`](../../google/generativeai/protos/SafetyRating.md): Safety rating for a piece of content.
+
+[`class SafetySetting`](../../google/generativeai/protos/SafetySetting.md): Safety setting, affecting the safety-blocking behavior.
+
+[`class Schema`](../../google/generativeai/protos/Schema.md): The ``Schema`` object allows the definition of input and output data types.
+
+[`class SemanticRetrieverConfig`](../../google/generativeai/protos/SemanticRetrieverConfig.md): Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API.
+
+[`class StringList`](../../google/generativeai/protos/StringList.md): User provided string values assigned to a single metadata key.
+
+[`class TaskType`](../../google/generativeai/protos/TaskType.md): Type of task for which the embedding will be used.
+
+[`class TextCompletion`](../../google/generativeai/protos/TextCompletion.md): Output text returned from a model.
+
+[`class TextPrompt`](../../google/generativeai/protos/TextPrompt.md): Text given to the model as a prompt.
+
+[`class Tool`](../../google/generativeai/protos/Tool.md): Tool details that the model may use to generate response.
+
+[`class ToolConfig`](../../google/generativeai/protos/ToolConfig.md): The Tool configuration containing parameters for specifying ``Tool`` use in the request.
+
+[`class TransferOwnershipRequest`](../../google/generativeai/protos/TransferOwnershipRequest.md): Request to transfer the ownership of the tuned model.
+
+[`class TransferOwnershipResponse`](../../google/generativeai/protos/TransferOwnershipResponse.md): Response from ``TransferOwnership``.
+
+[`class TunedModel`](../../google/generativeai/protos/TunedModel.md): A fine-tuned model created using ModelService.CreateTunedModel.
+
+[`class TunedModelSource`](../../google/generativeai/protos/TunedModelSource.md): Tuned model as a source for training a new model.
+
+[`class TuningExample`](../../google/generativeai/protos/TuningExample.md): A single example for tuning.
+
+[`class TuningExamples`](../../google/generativeai/protos/TuningExamples.md): A set of tuning examples. Can be training or validation data.
+
+[`class TuningSnapshot`](../../google/generativeai/protos/TuningSnapshot.md): Record for a single tuning step.
+
+[`class TuningTask`](../../google/generativeai/protos/TuningTask.md): Tuning tasks that create tuned models.
+
+[`class Type`](../../google/generativeai/protos/Type.md): Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
+
+[`class UpdateCachedContentRequest`](../../google/generativeai/protos/UpdateCachedContentRequest.md): Request to update CachedContent.
+
+[`class UpdateChunkRequest`](../../google/generativeai/protos/UpdateChunkRequest.md): Request to update a ``Chunk``.
+
+[`class UpdateCorpusRequest`](../../google/generativeai/protos/UpdateCorpusRequest.md): Request to update a ``Corpus``.
+
+[`class UpdateDocumentRequest`](../../google/generativeai/protos/UpdateDocumentRequest.md): Request to update a ``Document``.
+
+[`class UpdatePermissionRequest`](../../google/generativeai/protos/UpdatePermissionRequest.md): Request to update the ``Permission``.
+
+[`class UpdateTunedModelRequest`](../../google/generativeai/protos/UpdateTunedModelRequest.md): Request to update a TunedModel.
+
+[`class VideoMetadata`](../../google/generativeai/protos/VideoMetadata.md): Metadata for a video ``File``.
+
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId.md b/docs/api/google/generativeai/protos/AttributionSourceId.md
new file mode 100644
index 000000000..bdc426825
--- /dev/null
+++ b/docs/api/google/generativeai/protos/AttributionSourceId.md
@@ -0,0 +1,73 @@
+description: Identifier for the source contributing to this attribution.
+
+
+
+
+
+
+
+
+# google.generativeai.protos.AttributionSourceId
+
+
+
+
+
+
+
+Identifier for the source contributing to this attribution.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`grounding_passage`
+ |
+
+`google.ai.generativelanguage.AttributionSourceId.GroundingPassageId`
+
+Identifier for an inline passage.
+
+This field is a member of `oneof`_ ``source``.
+ |
+
+
+`semantic_retriever_chunk`
+ |
+
+`google.ai.generativelanguage.AttributionSourceId.SemanticRetrieverChunk`
+
+Identifier for a ``Chunk`` fetched via Semantic Retriever.
+
+This field is a member of `oneof`_ ``source``.
+ |
+
+
+
+
+
+## Child Classes
+[`class GroundingPassageId`](../../../google/generativeai/protos/AttributionSourceId/GroundingPassageId.md)
+
+[`class SemanticRetrieverChunk`](../../../google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md)
+
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
new file mode 100644
index 000000000..0e55174a0
--- /dev/null
+++ b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
@@ -0,0 +1,59 @@
+description: Identifier for a part within a GroundingPassage.
+
+
+
+
+
+
+# google.generativeai.protos.AttributionSourceId.GroundingPassageId
+
+
+
+
+
+
+
+Identifier for a part within a ``GroundingPassage``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`passage_id`
+ |
+
+`str`
+
+Output only. ID of the passage matching the
+``GenerateAnswerRequest``'s GroundingPassage.id .
+ |
+
+
+`part_index`
+ |
+
+`int`
+
+Output only. Index of the part within the
+``GenerateAnswerRequest``'s GroundingPassage.content .
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
new file mode 100644
index 000000000..dab874e2e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
@@ -0,0 +1,60 @@
+description: Identifier for a Chunk retrieved via Semantic Retriever specified in the GenerateAnswerRequest using SemanticRetrieverConfig.
+
+
+
+
+
+
+# google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk
+
+
+
+
+
+
+
+Identifier for a ``Chunk`` retrieved via Semantic Retriever specified in the ``GenerateAnswerRequest`` using ``SemanticRetrieverConfig``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`source`
+ |
+
+`str`
+
+Output only. Name of the source matching the request's
+SemanticRetrieverConfig.source . Example: ``corpora/123``
+or ``corpora/123/documents/abc``
+ |
+
+
+`chunk`
+ |
+
+`str`
+
+Output only. Name of the ``Chunk`` containing the attributed
+text. Example: ``corpora/123/documents/abc/chunks/xyz``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
new file mode 100644
index 000000000..4508763a9
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
@@ -0,0 +1,62 @@
+description: Request to batch create Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.BatchCreateChunksRequest
+
+
+
+
+
+
+
+Request to batch create ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Optional. The name of the ``Document`` where this batch of
+``Chunk``\ s will be created. The parent field in every
+``CreateChunkRequest`` must match this value. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`requests`
+ |
+
+`MutableSequence[google.ai.generativelanguage.CreateChunkRequest]`
+
+Required. The request messages specifying the ``Chunk``\ s
+to create. A maximum of 100 ``Chunk``\ s can be created in a
+batch.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
new file mode 100644
index 000000000..c5c7315cf
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
@@ -0,0 +1,48 @@
+description: Response from BatchCreateChunks containing a list of created Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.BatchCreateChunksResponse
+
+
+
+
+
+
+
+Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`chunks`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Chunk]`
+
+``Chunk``\ s created.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
new file mode 100644
index 000000000..da61fbb11
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
@@ -0,0 +1,61 @@
+description: Request to batch delete Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.BatchDeleteChunksRequest
+
+
+
+
+
+
+
+Request to batch delete ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Optional. The name of the ``Document`` containing the
+``Chunk``\ s to delete. The parent field in every
+``DeleteChunkRequest`` must match this value. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`requests`
+ |
+
+`MutableSequence[google.ai.generativelanguage.DeleteChunkRequest]`
+
+Required. The request messages specifying the ``Chunk``\ s
+to delete.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
new file mode 100644
index 000000000..229a8899a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
@@ -0,0 +1,65 @@
+description: Batch request to get embeddings from the model for a list of prompts.
+
+
+
+
+
+
+# google.generativeai.protos.BatchEmbedContentsRequest
+
+
+
+
+
+
+
+Batch request to get embeddings from the model for a list of prompts.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+ |
+
+
+`requests`
+ |
+
+`MutableSequence[google.ai.generativelanguage.EmbedContentRequest]`
+
+Required. Embed requests for the batch. The model in each of
+these requests must match the model specified
+BatchEmbedContentsRequest.model .
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
new file mode 100644
index 000000000..a4fc7b328
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
@@ -0,0 +1,50 @@
+description: The response to a BatchEmbedContentsRequest.
+
+
+
+
+
+
+# google.generativeai.protos.BatchEmbedContentsResponse
+
+
+
+
+
+
+
+The response to a ``BatchEmbedContentsRequest``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`embeddings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.ContentEmbedding]`
+
+Output only. The embeddings for each request,
+in the same order as provided in the batch
+request.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
new file mode 100644
index 000000000..729ca1683
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
@@ -0,0 +1,71 @@
+description: Batch request to get a text embedding from the model.
+
+
+
+
+
+
+# google.generativeai.protos.BatchEmbedTextRequest
+
+
+
+
+
+
+
+Batch request to get a text embedding from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The name of the ``Model`` to use for generating
+the embedding. Examples: models/embedding-gecko-001
+ |
+
+
+`texts`
+ |
+
+`MutableSequence[str]`
+
+Optional. The free-form input texts that the
+model will turn into an embedding. The current
+limit is 100 texts, over which an error will be
+thrown.
+ |
+
+
+`requests`
+ |
+
+`MutableSequence[google.ai.generativelanguage.EmbedTextRequest]`
+
+Optional. Embed requests for the batch. Only one of
+``texts`` or ``requests`` can be set.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
new file mode 100644
index 000000000..852411097
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
@@ -0,0 +1,49 @@
+description: The response to a EmbedTextRequest.
+
+
+
+
+
+
+# google.generativeai.protos.BatchEmbedTextResponse
+
+
+
+
+
+
+
+The response to a EmbedTextRequest.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`embeddings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Embedding]`
+
+Output only. The embeddings generated from
+the input text.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
new file mode 100644
index 000000000..0d1445da9
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
@@ -0,0 +1,62 @@
+description: Request to batch update Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.BatchUpdateChunksRequest
+
+
+
+
+
+
+
+Request to batch update ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Optional. The name of the ``Document`` containing the
+``Chunk``\ s to update. The parent field in every
+``UpdateChunkRequest`` must match this value. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`requests`
+ |
+
+`MutableSequence[google.ai.generativelanguage.UpdateChunkRequest]`
+
+Required. The request messages specifying the ``Chunk``\ s
+to update. A maximum of 100 ``Chunk``\ s can be updated in a
+batch.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
new file mode 100644
index 000000000..258f21f7c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
@@ -0,0 +1,48 @@
+description: Response from BatchUpdateChunks containing a list of updated Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.BatchUpdateChunksResponse
+
+
+
+
+
+
+
+Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`chunks`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Chunk]`
+
+``Chunk``\ s updated.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Blob.md b/docs/api/google/generativeai/protos/Blob.md
new file mode 100644
index 000000000..7985af92f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Blob.md
@@ -0,0 +1,64 @@
+description: Raw media bytes.
+
+
+
+
+
+
+# google.generativeai.protos.Blob
+
+
+
+
+
+
+
+Raw media bytes.
+
+
+
+Text should not be sent as raw bytes, use the 'text' field.
+
+
+
+
+
+
+Attributes |
+
+
+
+`mime_type`
+ |
+
+`str`
+
+The IANA standard MIME type of the source data. Examples:
+
+- image/png
+- image/jpeg If an unsupported MIME type is provided, an
+ error will be returned. For a complete list of supported
+ types, see `Supported file
+ formats `__.
+ |
+
+
+`data`
+ |
+
+`bytes`
+
+Raw bytes for media formats.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CachedContent.md b/docs/api/google/generativeai/protos/CachedContent.md
new file mode 100644
index 000000000..dd6d513b9
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CachedContent.md
@@ -0,0 +1,181 @@
+description: Content that has been preprocessed and can be used in subsequent request to GenerativeService.
+
+
+
+
+
+
+
+# google.generativeai.protos.CachedContent
+
+
+
+
+
+
+
+Content that has been preprocessed and can be used in subsequent request to GenerativeService.
+
+
+
+Cached content can be only used with model it was created for.
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`expire_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Timestamp in UTC of when this resource is considered
+expired. This is *always* provided on output, regardless of
+what was sent on input.
+
+This field is a member of `oneof`_ ``expiration``.
+ |
+
+
+`ttl`
+ |
+
+`google.protobuf.duration_pb2.Duration`
+
+Input only. New TTL for this resource, input
+only.
+
+This field is a member of `oneof`_ ``expiration``.
+ |
+
+
+`name`
+ |
+
+`str`
+
+Optional. Identifier. The resource name referring to the
+cached content. Format: ``cachedContents/{id}``
+
+ |
+
+
+`display_name`
+ |
+
+`str`
+
+Optional. Immutable. The user-generated
+meaningful display name of the cached content.
+Maximum 128 Unicode characters.
+
+ |
+
+
+`model`
+ |
+
+`str`
+
+Required. Immutable. The name of the ``Model`` to use for
+cached content Format: ``models/{model}``
+
+ |
+
+
+`system_instruction`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Optional. Input only. Immutable. Developer
+set system instruction. Currently text only.
+
+ |
+
+
+`contents`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Optional. Input only. Immutable. The content
+to cache.
+ |
+
+
+`tools`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Tool]`
+
+Optional. Input only. Immutable. A list of ``Tools`` the
+model may use to generate the next response
+ |
+
+
+`tool_config`
+ |
+
+`google.ai.generativelanguage.ToolConfig`
+
+Optional. Input only. Immutable. Tool config.
+This config is shared for all tools.
+
+ |
+
+
+`create_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. Creation time of the cache
+entry.
+ |
+
+
+`update_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. When the cache entry was last
+updated in UTC time.
+ |
+
+
+`usage_metadata`
+ |
+
+`google.ai.generativelanguage.CachedContent.UsageMetadata`
+
+Output only. Metadata on the usage of the
+cached content.
+ |
+
+
+
+
+
+## Child Classes
+[`class UsageMetadata`](../../../google/generativeai/protos/CachedContent/UsageMetadata.md)
+
diff --git a/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
new file mode 100644
index 000000000..b2bab36fe
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
@@ -0,0 +1,49 @@
+description: Metadata on the usage of the cached content.
+
+
+
+
+
+
+# google.generativeai.protos.CachedContent.UsageMetadata
+
+
+
+
+
+
+
+Metadata on the usage of the cached content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`total_token_count`
+ |
+
+`int`
+
+Total number of tokens that the cached
+content consumes.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Candidate.md b/docs/api/google/generativeai/protos/Candidate.md
new file mode 100644
index 000000000..ee20c608c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Candidate.md
@@ -0,0 +1,123 @@
+description: A response candidate generated from the model.
+
+
+
+
+
+
+
+# google.generativeai.protos.Candidate
+
+
+
+
+
+
+
+A response candidate generated from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`index`
+ |
+
+`int`
+
+Output only. Index of the candidate in the
+list of candidates.
+
+ |
+
+
+`content`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Output only. Generated content returned from
+the model.
+ |
+
+
+`finish_reason`
+ |
+
+`google.ai.generativelanguage.Candidate.FinishReason`
+
+Optional. Output only. The reason why the
+model stopped generating tokens.
+If empty, the model has not stopped generating
+the tokens.
+ |
+
+
+`safety_ratings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+List of ratings for the safety of a response
+candidate.
+There is at most one rating per category.
+ |
+
+
+`citation_metadata`
+ |
+
+`google.ai.generativelanguage.CitationMetadata`
+
+Output only. Citation information for model-generated
+candidate.
+
+This field may be populated with recitation information for
+any text included in the ``content``. These are passages
+that are "recited" from copyrighted material in the
+foundational LLM's training data.
+ |
+
+
+`token_count`
+ |
+
+`int`
+
+Output only. Token count for this candidate.
+ |
+
+
+`grounding_attributions`
+ |
+
+`MutableSequence[google.ai.generativelanguage.GroundingAttribution]`
+
+Output only. Attribution information for sources that
+contributed to a grounded answer.
+
+This field is populated for ``GenerateAnswer`` calls.
+ |
+
+
+
+
+
+## Child Classes
+[`class FinishReason`](../../../google/generativeai/protos/Candidate/FinishReason.md)
+
diff --git a/docs/api/google/generativeai/protos/Candidate/FinishReason.md b/docs/api/google/generativeai/protos/Candidate/FinishReason.md
new file mode 100644
index 000000000..d6efd591b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Candidate/FinishReason.md
@@ -0,0 +1,733 @@
+description: Defines the reason why the model stopped generating tokens.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Candidate.FinishReason
+
+
+
+
+
+
+
+Defines the reason why the model stopped generating tokens.
+
+
+google.generativeai.protos.Candidate.FinishReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`FINISH_REASON_UNSPECIFIED`
+ |
+
+`0`
+
+Default value. This value is unused.
+ |
+
+
+`STOP`
+ |
+
+`1`
+
+Natural stop point of the model or provided
+stop sequence.
+ |
+
+
+`MAX_TOKENS`
+ |
+
+`2`
+
+The maximum number of tokens as specified in
+the request was reached.
+ |
+
+
+`SAFETY`
+ |
+
+`3`
+
+The candidate content was flagged for safety
+reasons.
+ |
+
+
+`RECITATION`
+ |
+
+`4`
+
+The candidate content was flagged for
+recitation reasons.
+ |
+
+
+`OTHER`
+ |
+
+`5`
+
+Unknown reason.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+FINISH_REASON_UNSPECIFIED
+ |
+
+``
+ |
+
+
+MAX_TOKENS
+ |
+
+``
+ |
+
+
+OTHER
+ |
+
+``
+ |
+
+
+RECITATION
+ |
+
+``
+ |
+
+
+SAFETY
+ |
+
+``
+ |
+
+
+STOP
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Chunk.md b/docs/api/google/generativeai/protos/Chunk.md
new file mode 100644
index 000000000..d807ab292
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Chunk.md
@@ -0,0 +1,108 @@
+description: A Chunk is a subpart of a Document that is treated as an independent unit for the purposes of vector representation and storage.
+
+
+
+
+
+
+
+# google.generativeai.protos.Chunk
+
+
+
+
+
+
+
+A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage.
+
+
+ A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s.
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Immutable. Identifier. The ``Chunk`` resource name. The ID
+(name excluding the `corpora/*/documents/*/chunks/` prefix)
+can contain up to 40 characters that are lowercase
+alphanumeric or dashes (-). The ID cannot start or end with
+a dash. If the name is empty on create, a random
+12-character unique ID will be generated. Example:
+``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c``
+ |
+
+
+`data`
+ |
+
+`google.ai.generativelanguage.ChunkData`
+
+Required. The content for the ``Chunk``, such as the text
+string. The maximum number of tokens per chunk is 2043.
+ |
+
+
+`custom_metadata`
+ |
+
+`MutableSequence[google.ai.generativelanguage.CustomMetadata]`
+
+Optional. User provided custom metadata stored as key-value
+pairs. The maximum number of ``CustomMetadata`` per chunk is
+20.
+ |
+
+
+`create_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Chunk`` was
+created.
+ |
+
+
+`update_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Chunk`` was last
+updated.
+ |
+
+
+`state`
+ |
+
+`google.ai.generativelanguage.Chunk.State`
+
+Output only. Current state of the ``Chunk``.
+ |
+
+
+
+
+
+## Child Classes
+[`class State`](../../../google/generativeai/protos/Chunk/State.md)
+
diff --git a/docs/api/google/generativeai/protos/Chunk/State.md b/docs/api/google/generativeai/protos/Chunk/State.md
new file mode 100644
index 000000000..137cb9efe
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Chunk/State.md
@@ -0,0 +1,696 @@
+description: States for the lifecycle of a Chunk.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Chunk.State
+
+
+
+
+
+
+
+States for the lifecycle of a ``Chunk``.
+
+
+google.generativeai.protos.Chunk.State(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`STATE_UNSPECIFIED`
+ |
+
+`0`
+
+The default value. This value is used if the
+state is omitted.
+ |
+
+
+`STATE_PENDING_PROCESSING`
+ |
+
+`1`
+
+``Chunk`` is being processed (embedding and vector storage).
+ |
+
+
+`STATE_ACTIVE`
+ |
+
+`2`
+
+``Chunk`` is processed and available for querying.
+ |
+
+
+`STATE_FAILED`
+ |
+
+`10`
+
+``Chunk`` failed processing.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+STATE_ACTIVE
+ |
+
+``
+ |
+
+
+STATE_FAILED
+ |
+
+``
+ |
+
+
+STATE_PENDING_PROCESSING
+ |
+
+``
+ |
+
+
+STATE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/ChunkData.md b/docs/api/google/generativeai/protos/ChunkData.md
new file mode 100644
index 000000000..9527b78b5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ChunkData.md
@@ -0,0 +1,51 @@
+description: Extracted data that represents the Chunk content.
+
+
+
+
+
+
+# google.generativeai.protos.ChunkData
+
+
+
+
+
+
+
+Extracted data that represents the ``Chunk`` content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`string_value`
+ |
+
+`str`
+
+The ``Chunk`` content as a string. The maximum number of
+tokens per chunk is 2043.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CitationMetadata.md b/docs/api/google/generativeai/protos/CitationMetadata.md
new file mode 100644
index 000000000..58e6219df
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CitationMetadata.md
@@ -0,0 +1,48 @@
+description: A collection of source attributions for a piece of content.
+
+
+
+
+
+
+# google.generativeai.protos.CitationMetadata
+
+
+
+
+
+
+
+A collection of source attributions for a piece of content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`citation_sources`
+ |
+
+`MutableSequence[google.ai.generativelanguage.CitationSource]`
+
+Citations to sources for a specific response.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CitationSource.md b/docs/api/google/generativeai/protos/CitationSource.md
new file mode 100644
index 000000000..79689da5d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CitationSource.md
@@ -0,0 +1,88 @@
+description: A citation to a source for a portion of a specific response.
+
+
+
+
+
+
+# google.generativeai.protos.CitationSource
+
+
+
+
+
+
+
+A citation to a source for a portion of a specific response.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`start_index`
+ |
+
+`int`
+
+Optional. Start of segment of the response
+that is attributed to this source.
+
+Index indicates the start of the segment,
+measured in bytes.
+
+ |
+
+
+`end_index`
+ |
+
+`int`
+
+Optional. End of the attributed segment,
+exclusive.
+
+ |
+
+
+`uri`
+ |
+
+`str`
+
+Optional. URI that is attributed as a source
+for a portion of the text.
+
+ |
+
+
+`license_`
+ |
+
+`str`
+
+Optional. License for the GitHub project that
+is attributed as a source for segment.
+
+License info is required for code citations.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CodeExecution.md b/docs/api/google/generativeai/protos/CodeExecution.md
new file mode 100644
index 000000000..f74a88327
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CodeExecution.md
@@ -0,0 +1,29 @@
+description: Tool that executes code generated by the model, and automatically returns the result to the model.
+
+
+
+
+
+
+# google.generativeai.protos.CodeExecution
+
+
+
+
+
+
+
+Tool that executes code generated by the model, and automatically returns the result to the model.
+
+
+
+See also ``ExecutableCode`` and ``CodeExecutionResult`` which are
+only generated when using this tool.
+
diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult.md b/docs/api/google/generativeai/protos/CodeExecutionResult.md
new file mode 100644
index 000000000..39ce4573d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CodeExecutionResult.md
@@ -0,0 +1,65 @@
+description: Result of executing the ExecutableCode.
+
+
+
+
+
+
+
+# google.generativeai.protos.CodeExecutionResult
+
+
+
+
+
+
+
+Result of executing the ``ExecutableCode``.
+
+
+
+Only generated when using the ``CodeExecution``, and always follows
+a ``part`` containing the ``ExecutableCode``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`outcome`
+ |
+
+`google.ai.generativelanguage.CodeExecutionResult.Outcome`
+
+Required. Outcome of the code execution.
+ |
+
+
+`output`
+ |
+
+`str`
+
+Optional. Contains stdout when code execution
+is successful, stderr or other description
+otherwise.
+ |
+
+
+
+
+
+## Child Classes
+[`class Outcome`](../../../google/generativeai/protos/CodeExecutionResult/Outcome.md)
+
diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
new file mode 100644
index 000000000..54b0195fd
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
@@ -0,0 +1,699 @@
+description: Enumeration of possible outcomes of the code execution.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.CodeExecutionResult.Outcome
+
+
+
+
+
+
+
+Enumeration of possible outcomes of the code execution.
+
+
+google.generativeai.protos.CodeExecutionResult.Outcome(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`OUTCOME_UNSPECIFIED`
+ |
+
+`0`
+
+Unspecified status. This value should not be
+used.
+ |
+
+
+`OUTCOME_OK`
+ |
+
+`1`
+
+Code execution completed successfully.
+ |
+
+
+`OUTCOME_FAILED`
+ |
+
+`2`
+
+Code execution finished but with a failure. ``stderr``
+should contain the reason.
+ |
+
+
+`OUTCOME_DEADLINE_EXCEEDED`
+ |
+
+`3`
+
+Code execution ran for too long, and was
+cancelled. There may or may not be a partial
+output present.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+OUTCOME_DEADLINE_EXCEEDED
+ |
+
+``
+ |
+
+
+OUTCOME_FAILED
+ |
+
+``
+ |
+
+
+OUTCOME_OK
+ |
+
+``
+ |
+
+
+OUTCOME_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Condition.md b/docs/api/google/generativeai/protos/Condition.md
new file mode 100644
index 000000000..194bdbc59
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Condition.md
@@ -0,0 +1,80 @@
+description: Filter condition applicable to a single key.
+
+
+
+
+
+
+
+# google.generativeai.protos.Condition
+
+
+
+
+
+
+
+Filter condition applicable to a single key.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`string_value`
+ |
+
+`str`
+
+The string value to filter the metadata on.
+
+This field is a member of `oneof`_ ``value``.
+ |
+
+
+`numeric_value`
+ |
+
+`float`
+
+The numeric value to filter the metadata on.
+
+This field is a member of `oneof`_ ``value``.
+ |
+
+
+`operation`
+ |
+
+`google.ai.generativelanguage.Condition.Operator`
+
+Required. Operator applied to the given
+key-value pair to trigger the condition.
+ |
+
+
+
+
+
+## Child Classes
+[`class Operator`](../../../google/generativeai/protos/Condition/Operator.md)
+
diff --git a/docs/api/google/generativeai/protos/Condition/Operator.md b/docs/api/google/generativeai/protos/Condition/Operator.md
new file mode 100644
index 000000000..1a7cdec31
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Condition/Operator.md
@@ -0,0 +1,782 @@
+description: Defines the valid operators that can be applied to a key-value pair.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Condition.Operator
+
+
+
+
+
+
+
+Defines the valid operators that can be applied to a key-value pair.
+
+
+google.generativeai.protos.Condition.Operator(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`OPERATOR_UNSPECIFIED`
+ |
+
+`0`
+
+The default value. This value is unused.
+ |
+
+
+`LESS`
+ |
+
+`1`
+
+Supported by numeric.
+ |
+
+
+`LESS_EQUAL`
+ |
+
+`2`
+
+Supported by numeric.
+ |
+
+
+`EQUAL`
+ |
+
+`3`
+
+Supported by numeric & string.
+ |
+
+
+`GREATER_EQUAL`
+ |
+
+`4`
+
+Supported by numeric.
+ |
+
+
+`GREATER`
+ |
+
+`5`
+
+Supported by numeric.
+ |
+
+
+`NOT_EQUAL`
+ |
+
+`6`
+
+Supported by numeric & string.
+ |
+
+
+`INCLUDES`
+ |
+
+`7`
+
+Supported by string only when ``CustomMetadata`` value type
+for the given key has a ``string_list_value``.
+ |
+
+
+`EXCLUDES`
+ |
+
+`8`
+
+Supported by string only when ``CustomMetadata`` value type
+for the given key has a ``string_list_value``.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+EQUAL
+ |
+
+``
+ |
+
+
+EXCLUDES
+ |
+
+``
+ |
+
+
+GREATER
+ |
+
+``
+ |
+
+
+GREATER_EQUAL
+ |
+
+``
+ |
+
+
+INCLUDES
+ |
+
+``
+ |
+
+
+LESS
+ |
+
+``
+ |
+
+
+LESS_EQUAL
+ |
+
+``
+ |
+
+
+NOT_EQUAL
+ |
+
+``
+ |
+
+
+OPERATOR_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Content.md b/docs/api/google/generativeai/protos/Content.md
new file mode 100644
index 000000000..af204b5cf
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Content.md
@@ -0,0 +1,64 @@
+description: The base structured datatype containing multi-part content of a message.
+
+
+
+
+
+
+# google.generativeai.protos.Content
+
+
+
+
+
+
+
+The base structured datatype containing multi-part content of a message.
+
+
+
+A ``Content`` includes a ``role`` field designating the producer of
+the ``Content`` and a ``parts`` field containing multi-part data
+that contains the content of the message turn.
+
+
+
+
+
+
+Attributes |
+
+
+
+`parts`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Part]`
+
+Ordered ``Parts`` that constitute a single message. Parts
+may have different MIME types.
+ |
+
+
+`role`
+ |
+
+`str`
+
+Optional. The producer of the content. Must
+be either 'user' or 'model'.
+Useful to set for multi-turn conversations,
+otherwise can be left blank or unset.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ContentEmbedding.md b/docs/api/google/generativeai/protos/ContentEmbedding.md
new file mode 100644
index 000000000..ce60ea10a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ContentEmbedding.md
@@ -0,0 +1,48 @@
+description: A list of floats representing an embedding.
+
+
+
+
+
+
+# google.generativeai.protos.ContentEmbedding
+
+
+
+
+
+
+
+A list of floats representing an embedding.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`values`
+ |
+
+`MutableSequence[float]`
+
+The embedding values.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ContentFilter.md b/docs/api/google/generativeai/protos/ContentFilter.md
new file mode 100644
index 000000000..659d438c5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ContentFilter.md
@@ -0,0 +1,67 @@
+description: Content filtering metadata associated with processing a single request.
+
+
+
+
+
+
+
+# google.generativeai.protos.ContentFilter
+
+
+
+
+
+
+
+Content filtering metadata associated with processing a single request.
+
+
+ContentFilter contains a reason and an optional supporting
+string. The reason may be unspecified.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`reason`
+ |
+
+`google.ai.generativelanguage.ContentFilter.BlockedReason`
+
+The reason content was blocked during request
+processing.
+ |
+
+
+`message`
+ |
+
+`str`
+
+A string that describes the filtering
+behavior in more detail.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class BlockedReason`](../../../google/generativeai/types/BlockedReason.md)
+
diff --git a/docs/api/google/generativeai/protos/Corpus.md b/docs/api/google/generativeai/protos/Corpus.md
new file mode 100644
index 000000000..66b6ded78
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Corpus.md
@@ -0,0 +1,87 @@
+description: A Corpus is a collection of Document\ s.
+
+
+
+
+
+
+# google.generativeai.protos.Corpus
+
+
+
+
+
+
+
+A ``Corpus`` is a collection of ``Document``\ s.
+
+
+ A project can
+create up to 5 corpora.
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Immutable. Identifier. The ``Corpus`` resource name. The ID
+(name excluding the "corpora/" prefix) can contain up to 40
+characters that are lowercase alphanumeric or dashes (-).
+The ID cannot start or end with a dash. If the name is empty
+on create, a unique name will be derived from
+``display_name`` along with a 12 character random suffix.
+Example: ``corpora/my-awesome-corpora-123a456b789c``
+ |
+
+
+`display_name`
+ |
+
+`str`
+
+Optional. The human-readable display name for the
+``Corpus``. The display name must be no more than 512
+characters in length, including spaces. Example: "Docs on
+Semantic Retriever".
+ |
+
+
+`create_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Corpus`` was
+created.
+ |
+
+
+`update_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Corpus`` was last
+updated.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountMessageTokensRequest.md b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md
new file mode 100644
index 000000000..10be99376
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md
@@ -0,0 +1,66 @@
+description: Counts the number of tokens in the prompt sent to a model.
+
+
+
+
+
+
+# google.generativeai.protos.CountMessageTokensRequest
+
+
+
+
+
+
+
+Counts the number of tokens in the ``prompt`` sent to a model.
+
+
+
+Models may tokenize text differently, so each model may return a
+different ``token_count``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+ |
+
+
+`prompt`
+ |
+
+`google.ai.generativelanguage.MessagePrompt`
+
+Required. The prompt, whose token count is to
+be returned.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountMessageTokensResponse.md b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md
new file mode 100644
index 000000000..91a414735
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md
@@ -0,0 +1,52 @@
+description: A response from CountMessageTokens.
+
+
+
+
+
+
+# google.generativeai.protos.CountMessageTokensResponse
+
+
+
+
+
+
+
+A response from ``CountMessageTokens``.
+
+
+
+It returns the model's ``token_count`` for the ``prompt``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`token_count`
+ |
+
+`int`
+
+The number of tokens that the ``model`` tokenizes the
+``prompt`` into.
+
+Always non-negative.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTextTokensRequest.md b/docs/api/google/generativeai/protos/CountTextTokensRequest.md
new file mode 100644
index 000000000..dc28b4959
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTextTokensRequest.md
@@ -0,0 +1,66 @@
+description: Counts the number of tokens in the prompt sent to a model.
+
+
+
+
+
+
+# google.generativeai.protos.CountTextTokensRequest
+
+
+
+
+
+
+
+Counts the number of tokens in the ``prompt`` sent to a model.
+
+
+
+Models may tokenize text differently, so each model may return a
+different ``token_count``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+ |
+
+
+`prompt`
+ |
+
+`google.ai.generativelanguage.TextPrompt`
+
+Required. The free-form input text given to
+the model as a prompt.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTextTokensResponse.md b/docs/api/google/generativeai/protos/CountTextTokensResponse.md
new file mode 100644
index 000000000..75b139c89
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTextTokensResponse.md
@@ -0,0 +1,52 @@
+description: A response from CountTextTokens.
+
+
+
+
+
+
+# google.generativeai.protos.CountTextTokensResponse
+
+
+
+
+
+
+
+A response from ``CountTextTokens``.
+
+
+
+It returns the model's ``token_count`` for the ``prompt``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`token_count`
+ |
+
+`int`
+
+The number of tokens that the ``model`` tokenizes the
+``prompt`` into.
+
+Always non-negative.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTokensRequest.md b/docs/api/google/generativeai/protos/CountTokensRequest.md
new file mode 100644
index 000000000..3ec7fd287
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTokensRequest.md
@@ -0,0 +1,77 @@
+description: Counts the number of tokens in the prompt sent to a model.
+
+
+
+
+
+
+# google.generativeai.protos.CountTokensRequest
+
+
+
+
+
+
+
+Counts the number of tokens in the ``prompt`` sent to a model.
+
+
+
+Models may tokenize text differently, so each model may return a
+different ``token_count``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+ |
+
+
+`contents`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Optional. The input given to the model as a prompt. This
+field is ignored when ``generate_content_request`` is set.
+ |
+
+
+`generate_content_request`
+ |
+
+`google.ai.generativelanguage.GenerateContentRequest`
+
+Optional. The overall input given to the
+model. CountTokens will count prompt, function
+calling, etc.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTokensResponse.md b/docs/api/google/generativeai/protos/CountTokensResponse.md
new file mode 100644
index 000000000..ca4c9899a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTokensResponse.md
@@ -0,0 +1,64 @@
+description: A response from CountTokens.
+
+
+
+
+
+
+# google.generativeai.protos.CountTokensResponse
+
+
+
+
+
+
+
+A response from ``CountTokens``.
+
+
+
+It returns the model's ``token_count`` for the ``prompt``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`total_tokens`
+ |
+
+`int`
+
+The number of tokens that the ``model`` tokenizes the
+``prompt`` into.
+
+Always non-negative. When cached_content is set, this is
+still the total effective prompt size. I.e. this includes
+the number of tokens in the cached content.
+ |
+
+
+`cached_content_token_count`
+ |
+
+`int`
+
+Number of tokens in the cached part of the
+prompt, i.e. in the cached content.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateCachedContentRequest.md b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md
new file mode 100644
index 000000000..d34f0b8da
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md
@@ -0,0 +1,48 @@
+description: Request to create CachedContent.
+
+
+
+
+
+
+# google.generativeai.protos.CreateCachedContentRequest
+
+
+
+
+
+
+
+Request to create CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`cached_content`
+ |
+
+`google.ai.generativelanguage.CachedContent`
+
+Required. The cached content to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateChunkRequest.md b/docs/api/google/generativeai/protos/CreateChunkRequest.md
new file mode 100644
index 000000000..706273d4a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateChunkRequest.md
@@ -0,0 +1,59 @@
+description: Request to create a Chunk.
+
+
+
+
+
+
+# google.generativeai.protos.CreateChunkRequest
+
+
+
+
+
+
+
+Request to create a ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Required. The name of the ``Document`` where this ``Chunk``
+will be created. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`chunk`
+ |
+
+`google.ai.generativelanguage.Chunk`
+
+Required. The ``Chunk`` to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateCorpusRequest.md b/docs/api/google/generativeai/protos/CreateCorpusRequest.md
new file mode 100644
index 000000000..68e3a89ce
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateCorpusRequest.md
@@ -0,0 +1,48 @@
+description: Request to create a Corpus.
+
+
+
+
+
+
+# google.generativeai.protos.CreateCorpusRequest
+
+
+
+
+
+
+
+Request to create a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`corpus`
+ |
+
+`google.ai.generativelanguage.Corpus`
+
+Required. The ``Corpus`` to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateDocumentRequest.md b/docs/api/google/generativeai/protos/CreateDocumentRequest.md
new file mode 100644
index 000000000..72a14b3aa
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateDocumentRequest.md
@@ -0,0 +1,58 @@
+description: Request to create a Document.
+
+
+
+
+
+
+# google.generativeai.protos.CreateDocumentRequest
+
+
+
+
+
+
+
+Request to create a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Required. The name of the ``Corpus`` where this ``Document``
+will be created. Example: ``corpora/my-corpus-123``
+ |
+
+
+`document`
+ |
+
+`google.ai.generativelanguage.Document`
+
+Required. The ``Document`` to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateFileRequest.md b/docs/api/google/generativeai/protos/CreateFileRequest.md
new file mode 100644
index 000000000..53f83c72c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateFileRequest.md
@@ -0,0 +1,48 @@
+description: Request for CreateFile.
+
+
+
+
+
+
+# google.generativeai.protos.CreateFileRequest
+
+
+
+
+
+
+
+Request for ``CreateFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`file`
+ |
+
+`google.ai.generativelanguage.File`
+
+Optional. Metadata for the file to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateFileResponse.md b/docs/api/google/generativeai/protos/CreateFileResponse.md
new file mode 100644
index 000000000..fd8ad9f44
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateFileResponse.md
@@ -0,0 +1,48 @@
+description: Response for CreateFile.
+
+
+
+
+
+
+# google.generativeai.protos.CreateFileResponse
+
+
+
+
+
+
+
+Response for ``CreateFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`file`
+ |
+
+`google.ai.generativelanguage.File`
+
+Metadata for the created file.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreatePermissionRequest.md b/docs/api/google/generativeai/protos/CreatePermissionRequest.md
new file mode 100644
index 000000000..8e8c9b945
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreatePermissionRequest.md
@@ -0,0 +1,58 @@
+description: Request to create a Permission.
+
+
+
+
+
+
+# google.generativeai.protos.CreatePermissionRequest
+
+
+
+
+
+
+
+Request to create a ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Required. The parent resource of the ``Permission``.
+Formats: ``tunedModels/{tuned_model}`` ``corpora/{corpus}``
+ |
+
+
+`permission`
+ |
+
+`google.ai.generativelanguage.Permission`
+
+Required. The permission to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
new file mode 100644
index 000000000..79a3bd8e4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
@@ -0,0 +1,86 @@
+description: Metadata about the state and progress of creating a tuned model returned from the long-running operation
+
+
+
+
+
+
+# google.generativeai.protos.CreateTunedModelMetadata
+
+
+
+
+
+
+
+Metadata about the state and progress of creating a tuned model returned from the long-running operation
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`tuned_model`
+ |
+
+`str`
+
+Name of the tuned model associated with the
+tuning operation.
+ |
+
+
+`total_steps`
+ |
+
+`int`
+
+The total number of tuning steps.
+ |
+
+
+`completed_steps`
+ |
+
+`int`
+
+The number of steps completed.
+ |
+
+
+`completed_percent`
+ |
+
+`float`
+
+The completed percentage for the tuning
+operation.
+ |
+
+
+`snapshots`
+ |
+
+`MutableSequence[google.ai.generativelanguage.TuningSnapshot]`
+
+Metrics collected during tuning.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateTunedModelRequest.md b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md
new file mode 100644
index 000000000..572bc2c83
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md
@@ -0,0 +1,62 @@
+description: Request to create a TunedModel.
+
+
+
+
+
+
+# google.generativeai.protos.CreateTunedModelRequest
+
+
+
+
+
+
+
+Request to create a TunedModel.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`tuned_model_id`
+ |
+
+`str`
+
+Optional. The unique id for the tuned model if specified.
+This value should be up to 40 characters, the first
+character must be a letter, the last could be a letter or a
+number. The id must match the regular expression:
+`a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?.
+
+ |
+
+
+`tuned_model`
+ |
+
+`google.ai.generativelanguage.TunedModel`
+
+Required. The tuned model to create.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CustomMetadata.md b/docs/api/google/generativeai/protos/CustomMetadata.md
new file mode 100644
index 000000000..1a401a93c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CustomMetadata.md
@@ -0,0 +1,87 @@
+description: User provided metadata stored as key-value pairs.
+
+
+
+
+
+
+# google.generativeai.protos.CustomMetadata
+
+
+
+
+
+
+
+User provided metadata stored as key-value pairs.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`string_value`
+ |
+
+`str`
+
+The string value of the metadata to store.
+
+This field is a member of `oneof`_ ``value``.
+ |
+
+
+`string_list_value`
+ |
+
+`google.ai.generativelanguage.StringList`
+
+The StringList value of the metadata to
+store.
+
+This field is a member of `oneof`_ ``value``.
+ |
+
+
+`numeric_value`
+ |
+
+`float`
+
+The numeric value of the metadata to store.
+
+This field is a member of `oneof`_ ``value``.
+ |
+
+
+`key`
+ |
+
+`str`
+
+Required. The key of the metadata to store.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Dataset.md b/docs/api/google/generativeai/protos/Dataset.md
new file mode 100644
index 000000000..d00c056c6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Dataset.md
@@ -0,0 +1,50 @@
+description: Dataset for training or validation.
+
+
+
+
+
+
+# google.generativeai.protos.Dataset
+
+
+
+
+
+
+
+Dataset for training or validation.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`examples`
+ |
+
+`google.ai.generativelanguage.TuningExamples`
+
+Optional. Inline examples.
+
+This field is a member of `oneof`_ ``dataset``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
new file mode 100644
index 000000000..48df77591
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
@@ -0,0 +1,49 @@
+description: Request to delete CachedContent.
+
+
+
+
+
+
+# google.generativeai.protos.DeleteCachedContentRequest
+
+
+
+
+
+
+
+Request to delete CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name referring to the content cache
+entry Format: ``cachedContents/{id}``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteChunkRequest.md b/docs/api/google/generativeai/protos/DeleteChunkRequest.md
new file mode 100644
index 000000000..99f24bd06
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteChunkRequest.md
@@ -0,0 +1,50 @@
+description: Request to delete a Chunk.
+
+
+
+
+
+
+# google.generativeai.protos.DeleteChunkRequest
+
+
+
+
+
+
+
+Request to delete a ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the ``Chunk`` to delete.
+Example:
+``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteCorpusRequest.md b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md
new file mode 100644
index 000000000..0c4e42727
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md
@@ -0,0 +1,62 @@
+description: Request to delete a Corpus.
+
+
+
+
+
+
+# google.generativeai.protos.DeleteCorpusRequest
+
+
+
+
+
+
+
+Request to delete a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the ``Corpus``. Example:
+``corpora/my-corpus-123``
+ |
+
+
+`force`
+ |
+
+`bool`
+
+Optional. If set to true, any ``Document``\ s and objects
+related to this ``Corpus`` will also be deleted.
+
+If false (the default), a ``FAILED_PRECONDITION`` error will
+be returned if ``Corpus`` contains any ``Document``\ s.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteDocumentRequest.md b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md
new file mode 100644
index 000000000..e4c78f097
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md
@@ -0,0 +1,62 @@
+description: Request to delete a Document.
+
+
+
+
+
+
+# google.generativeai.protos.DeleteDocumentRequest
+
+
+
+
+
+
+
+Request to delete a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the ``Document`` to delete.
+Example: ``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`force`
+ |
+
+`bool`
+
+Optional. If set to true, any ``Chunk``\ s and objects
+related to this ``Document`` will also be deleted.
+
+If false (the default), a ``FAILED_PRECONDITION`` error will
+be returned if ``Document`` contains any ``Chunk``\ s.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteFileRequest.md b/docs/api/google/generativeai/protos/DeleteFileRequest.md
new file mode 100644
index 000000000..68cefab0a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteFileRequest.md
@@ -0,0 +1,49 @@
+description: Request for DeleteFile.
+
+
+
+
+
+
+# google.generativeai.protos.DeleteFileRequest
+
+
+
+
+
+
+
+Request for ``DeleteFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``File`` to delete. Example:
+``files/abc-123``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeletePermissionRequest.md b/docs/api/google/generativeai/protos/DeletePermissionRequest.md
new file mode 100644
index 000000000..616a8b075
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeletePermissionRequest.md
@@ -0,0 +1,50 @@
+description: Request to delete the Permission.
+
+
+
+
+
+
+# google.generativeai.protos.DeletePermissionRequest
+
+
+
+
+
+
+
+Request to delete the ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the permission. Formats:
+``tunedModels/{tuned_model}/permissions/{permission}``
+``corpora/{corpus}/permissions/{permission}``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
new file mode 100644
index 000000000..aa4cd8b93
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
@@ -0,0 +1,49 @@
+description: Request to delete a TunedModel.
+
+
+
+
+
+
+# google.generativeai.protos.DeleteTunedModelRequest
+
+
+
+
+
+
+
+Request to delete a TunedModel.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the model. Format:
+``tunedModels/my-model-id``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Document.md b/docs/api/google/generativeai/protos/Document.md
new file mode 100644
index 000000000..fe32d1e41
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Document.md
@@ -0,0 +1,99 @@
+description: A Document is a collection of Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.Document
+
+
+
+
+
+
+
+A ``Document`` is a collection of ``Chunk``\ s.
+
+
+ A ``Corpus`` can
+have a maximum of 10,000 ``Document``\ s.
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Immutable. Identifier. The ``Document`` resource name. The
+ID (name excluding the `corpora/*/documents/` prefix) can
+contain up to 40 characters that are lowercase alphanumeric
+or dashes (-). The ID cannot start or end with a dash. If
+the name is empty on create, a unique name will be derived
+from ``display_name`` along with a 12 character random
+suffix. Example:
+``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c``
+ |
+
+
+`display_name`
+ |
+
+`str`
+
+Optional. The human-readable display name for the
+``Document``. The display name must be no more than 512
+characters in length, including spaces. Example: "Semantic
+Retriever Documentation".
+ |
+
+
+`custom_metadata`
+ |
+
+`MutableSequence[google.ai.generativelanguage.CustomMetadata]`
+
+Optional. User provided custom metadata stored as key-value
+pairs used for querying. A ``Document`` can have a maximum
+of 20 ``CustomMetadata``.
+ |
+
+
+`update_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Document`` was last
+updated.
+ |
+
+
+`create_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Document`` was
+created.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedContentRequest.md b/docs/api/google/generativeai/protos/EmbedContentRequest.md
new file mode 100644
index 000000000..be4d52cc0
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedContentRequest.md
@@ -0,0 +1,103 @@
+description: Request containing the Content for the model to embed.
+
+
+
+
+
+
+# google.generativeai.protos.EmbedContentRequest
+
+
+
+
+
+
+
+Request containing the ``Content`` for the model to embed.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+ |
+
+
+`content`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Required. The content to embed. Only the ``parts.text``
+fields will be counted.
+ |
+
+
+`task_type`
+ |
+
+`google.ai.generativelanguage.TaskType`
+
+Optional. Optional task type for which the embeddings will
+be used. Can only be set for ``models/embedding-001``.
+
+ |
+
+
+`title`
+ |
+
+`str`
+
+Optional. An optional title for the text. Only applicable
+when TaskType is ``RETRIEVAL_DOCUMENT``.
+
+Note: Specifying a ``title`` for ``RETRIEVAL_DOCUMENT``
+provides better quality embeddings for retrieval.
+
+ |
+
+
+`output_dimensionality`
+ |
+
+`int`
+
+Optional. Optional reduced dimension for the output
+embedding. If set, excessive values in the output embedding
+are truncated from the end. Supported by newer models since
+2024, and the earlier model (``models/embedding-001``)
+cannot specify this value.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedContentResponse.md b/docs/api/google/generativeai/protos/EmbedContentResponse.md
new file mode 100644
index 000000000..80fce6b73
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedContentResponse.md
@@ -0,0 +1,49 @@
+description: The response to an EmbedContentRequest.
+
+
+
+
+
+
+# google.generativeai.protos.EmbedContentResponse
+
+
+
+
+
+
+
+The response to an ``EmbedContentRequest``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`embedding`
+ |
+
+`google.ai.generativelanguage.ContentEmbedding`
+
+Output only. The embedding generated from the
+input content.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedTextRequest.md b/docs/api/google/generativeai/protos/EmbedTextRequest.md
new file mode 100644
index 000000000..e4078db68
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedTextRequest.md
@@ -0,0 +1,59 @@
+description: Request to get a text embedding from the model.
+
+
+
+
+
+
+# google.generativeai.protos.EmbedTextRequest
+
+
+
+
+
+
+
+Request to get a text embedding from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The model name to use with the
+format model=models/{model}.
+ |
+
+
+`text`
+ |
+
+`str`
+
+Optional. The free-form input text that the
+model will turn into an embedding.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedTextResponse.md b/docs/api/google/generativeai/protos/EmbedTextResponse.md
new file mode 100644
index 000000000..adaf5be7d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedTextResponse.md
@@ -0,0 +1,50 @@
+description: The response to a EmbedTextRequest.
+
+
+
+
+
+
+# google.generativeai.protos.EmbedTextResponse
+
+
+
+
+
+
+
+The response to a EmbedTextRequest.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`embedding`
+ |
+
+`google.ai.generativelanguage.Embedding`
+
+Output only. The embedding generated from the
+input text.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Embedding.md b/docs/api/google/generativeai/protos/Embedding.md
new file mode 100644
index 000000000..7a8f70006
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Embedding.md
@@ -0,0 +1,48 @@
+description: A list of floats representing the embedding.
+
+
+
+
+
+
+# google.generativeai.protos.Embedding
+
+
+
+
+
+
+
+A list of floats representing the embedding.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`value`
+ |
+
+`MutableSequence[float]`
+
+The embedding values.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Example.md b/docs/api/google/generativeai/protos/Example.md
new file mode 100644
index 000000000..59aecebe1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Example.md
@@ -0,0 +1,60 @@
+description: An input/output example used to instruct the Model.
+
+
+
+
+
+
+# google.generativeai.protos.Example
+
+
+
+
+
+
+
+An input/output example used to instruct the Model.
+
+
+
+It demonstrates how the model should respond or format its
+response.
+
+
+
+
+
+
+Attributes |
+
+
+
+`input`
+ |
+
+`google.ai.generativelanguage.Message`
+
+Required. An example of an input ``Message`` from the user.
+ |
+
+
+`output`
+ |
+
+`google.ai.generativelanguage.Message`
+
+Required. An example of what the model should
+output given the input.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ExecutableCode.md b/docs/api/google/generativeai/protos/ExecutableCode.md
new file mode 100644
index 000000000..cb75a47b8
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ExecutableCode.md
@@ -0,0 +1,64 @@
+description: Code generated by the model that is meant to be executed, and the result returned to the model.
+
+
+
+
+
+
+
+# google.generativeai.protos.ExecutableCode
+
+
+
+
+
+
+
+Code generated by the model that is meant to be executed, and the result returned to the model.
+
+
+
+Only generated when using the ``CodeExecution`` tool, in which the
+code will be automatically executed, and a corresponding
+``CodeExecutionResult`` will also be generated.
+
+
+
+
+
+
+Attributes |
+
+
+
+`language`
+ |
+
+`google.ai.generativelanguage.ExecutableCode.Language`
+
+Required. Programming language of the ``code``.
+ |
+
+
+`code`
+ |
+
+`str`
+
+Required. The code to be executed.
+ |
+
+
+
+
+
+## Child Classes
+[`class Language`](../../../google/generativeai/protos/ExecutableCode/Language.md)
+
diff --git a/docs/api/google/generativeai/protos/ExecutableCode/Language.md b/docs/api/google/generativeai/protos/ExecutableCode/Language.md
new file mode 100644
index 000000000..23184a73a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ExecutableCode/Language.md
@@ -0,0 +1,663 @@
+description: Supported programming languages for the generated code.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.ExecutableCode.Language
+
+
+
+
+
+
+
+Supported programming languages for the generated code.
+
+
+google.generativeai.protos.ExecutableCode.Language(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`LANGUAGE_UNSPECIFIED`
+ |
+
+`0`
+
+Unspecified language. This value should not
+be used.
+ |
+
+
+`PYTHON`
+ |
+
+`1`
+
+Python >= 3.10, with numpy and simpy
+available.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+LANGUAGE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+PYTHON
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/File.md b/docs/api/google/generativeai/protos/File.md
new file mode 100644
index 000000000..71a2df334
--- /dev/null
+++ b/docs/api/google/generativeai/protos/File.md
@@ -0,0 +1,164 @@
+description: A file uploaded to the API.
+
+
+
+
+
+
+
+# google.generativeai.protos.File
+
+
+
+
+
+
+
+A file uploaded to the API.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`video_metadata`
+ |
+
+`google.ai.generativelanguage.VideoMetadata`
+
+Output only. Metadata for a video.
+
+This field is a member of `oneof`_ ``metadata``.
+ |
+
+
+`name`
+ |
+
+`str`
+
+Immutable. Identifier. The ``File`` resource name. The ID
+(name excluding the "files/" prefix) can contain up to 40
+characters that are lowercase alphanumeric or dashes (-).
+The ID cannot start or end with a dash. If the name is empty
+on create, a unique name will be generated. Example:
+``files/123-456``
+ |
+
+
+`display_name`
+ |
+
+`str`
+
+Optional. The human-readable display name for the ``File``.
+The display name must be no more than 512 characters in
+length, including spaces. Example: "Welcome Image".
+ |
+
+
+`mime_type`
+ |
+
+`str`
+
+Output only. MIME type of the file.
+ |
+
+
+`size_bytes`
+ |
+
+`int`
+
+Output only. Size of the file in bytes.
+ |
+
+
+`create_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp of when the ``File`` was created.
+ |
+
+
+`update_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp of when the ``File`` was last
+updated.
+ |
+
+
+`expiration_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp of when the ``File`` will be
+deleted. Only set if the ``File`` is scheduled to expire.
+ |
+
+
+`sha256_hash`
+ |
+
+`bytes`
+
+Output only. SHA-256 hash of the uploaded
+bytes.
+ |
+
+
+`uri`
+ |
+
+`str`
+
+Output only. The uri of the ``File``.
+ |
+
+
+`state`
+ |
+
+`google.ai.generativelanguage.File.State`
+
+Output only. Processing state of the File.
+ |
+
+
+`error`
+ |
+
+`google.rpc.status_pb2.Status`
+
+Output only. Error status if File processing
+failed.
+ |
+
+
+
+
+
+## Child Classes
+[`class State`](../../../google/generativeai/protos/File/State.md)
+
diff --git a/docs/api/google/generativeai/protos/File/State.md b/docs/api/google/generativeai/protos/File/State.md
new file mode 100644
index 000000000..d515f5512
--- /dev/null
+++ b/docs/api/google/generativeai/protos/File/State.md
@@ -0,0 +1,698 @@
+description: States for the lifecycle of a File.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.File.State
+
+
+
+
+
+
+
+States for the lifecycle of a File.
+
+
+google.generativeai.protos.File.State(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`STATE_UNSPECIFIED`
+ |
+
+`0`
+
+The default value. This value is used if the
+state is omitted.
+ |
+
+
+`PROCESSING`
+ |
+
+`1`
+
+File is being processed and cannot be used
+for inference yet.
+ |
+
+
+`ACTIVE`
+ |
+
+`2`
+
+File is processed and available for
+inference.
+ |
+
+
+`FAILED`
+ |
+
+`10`
+
+File failed processing.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+ACTIVE
+ |
+
+``
+ |
+
+
+FAILED
+ |
+
+``
+ |
+
+
+PROCESSING
+ |
+
+``
+ |
+
+
+STATE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/FileData.md b/docs/api/google/generativeai/protos/FileData.md
new file mode 100644
index 000000000..d5c4678b5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FileData.md
@@ -0,0 +1,58 @@
+description: URI based data.
+
+
+
+
+
+
+# google.generativeai.protos.FileData
+
+
+
+
+
+
+
+URI based data.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`mime_type`
+ |
+
+`str`
+
+Optional. The IANA standard MIME type of the
+source data.
+ |
+
+
+`file_uri`
+ |
+
+`str`
+
+Required. URI.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionCall.md b/docs/api/google/generativeai/protos/FunctionCall.md
new file mode 100644
index 000000000..68132319b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionCall.md
@@ -0,0 +1,62 @@
+description: A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
+
+
+
+
+
+
+# google.generativeai.protos.FunctionCall
+
+
+
+
+
+
+
+A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the function to call.
+Must be a-z, A-Z, 0-9, or contain underscores
+and dashes, with a maximum length of 63.
+ |
+
+
+`args`
+ |
+
+`google.protobuf.struct_pb2.Struct`
+
+Optional. The function parameters and values
+in JSON object format.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig.md b/docs/api/google/generativeai/protos/FunctionCallingConfig.md
new file mode 100644
index 000000000..f4262e213
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionCallingConfig.md
@@ -0,0 +1,69 @@
+description: Configuration for specifying function calling behavior.
+
+
+
+
+
+
+
+# google.generativeai.protos.FunctionCallingConfig
+
+
+
+
+
+
+
+Configuration for specifying function calling behavior.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`mode`
+ |
+
+`google.ai.generativelanguage.FunctionCallingConfig.Mode`
+
+Optional. Specifies the mode in which
+function calling should execute. If unspecified,
+the default value will be set to AUTO.
+ |
+
+
+`allowed_function_names`
+ |
+
+`MutableSequence[str]`
+
+Optional. A set of function names that, when provided,
+limits the functions the model will call.
+
+This should only be set when the Mode is ANY. Function names
+should match [FunctionDeclaration.name]. With mode set to
+ANY, model will predict a function call from the set of
+function names provided.
+ |
+
+
+
+
+
+## Child Classes
+[`class Mode`](../../../google/generativeai/protos/FunctionCallingConfig/Mode.md)
+
diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
new file mode 100644
index 000000000..1a3483e8e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
@@ -0,0 +1,704 @@
+description: Defines the execution behavior for function calling by defining the execution mode.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.FunctionCallingConfig.Mode
+
+
+
+
+
+
+
+Defines the execution behavior for function calling by defining the execution mode.
+
+
+google.generativeai.protos.FunctionCallingConfig.Mode(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`MODE_UNSPECIFIED`
+ |
+
+`0`
+
+Unspecified function calling mode. This value
+should not be used.
+ |
+
+
+`AUTO`
+ |
+
+`1`
+
+Default model behavior, model decides to
+predict either a function call or a natural
+language response.
+ |
+
+
+`ANY`
+ |
+
+`2`
+
+Model is constrained to always predicting a function call
+only. If "allowed_function_names" are set, the predicted
+function call will be limited to any one of
+"allowed_function_names", else the predicted function call
+will be any one of the provided "function_declarations".
+ |
+
+
+`NONE`
+ |
+
+`3`
+
+Model will not predict any function call.
+Model behavior is same as when not passing any
+function declarations.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+ANY
+ |
+
+``
+ |
+
+
+AUTO
+ |
+
+``
+ |
+
+
+MODE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+NONE
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionDeclaration.md b/docs/api/google/generativeai/protos/FunctionDeclaration.md
new file mode 100644
index 000000000..62bc81229
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionDeclaration.md
@@ -0,0 +1,80 @@
+description: Structured representation of a function declaration as defined by the OpenAPI 3.03 specification __.
+
+
+
+
+
+
+# google.generativeai.protos.FunctionDeclaration
+
+
+
+
+
+
+
+Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__.
+
+
+ Included in
+this declaration are the function name and parameters. This
+FunctionDeclaration is a representation of a block of code that can
+be used as a ``Tool`` by the model and executed by the client.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the function.
+Must be a-z, A-Z, 0-9, or contain underscores
+and dashes, with a maximum length of 63.
+ |
+
+
+`description`
+ |
+
+`str`
+
+Required. A brief description of the
+function.
+ |
+
+
+`parameters`
+ |
+
+`google.ai.generativelanguage.Schema`
+
+Optional. Describes the parameters to this
+function. Reflects the Open API 3.03 Parameter
+Object string Key: the name of the parameter.
+Parameter names are case sensitive. Schema
+Value: the Schema defining the type used for the
+parameter.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionResponse.md b/docs/api/google/generativeai/protos/FunctionResponse.md
new file mode 100644
index 000000000..024421e79
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionResponse.md
@@ -0,0 +1,61 @@
+description: The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
+
+
+
+
+
+
+# google.generativeai.protos.FunctionResponse
+
+
+
+
+
+
+
+The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
+
+
+ This should contain the result of a\ ``FunctionCall``
+made based on model prediction.
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the function to call.
+Must be a-z, A-Z, 0-9, or contain underscores
+and dashes, with a maximum length of 63.
+ |
+
+
+`response`
+ |
+
+`google.protobuf.struct_pb2.Struct`
+
+Required. The function response in JSON
+object format.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md
new file mode 100644
index 000000000..b06a2302b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md
@@ -0,0 +1,151 @@
+description: Request to generate a grounded answer from the model.
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateAnswerRequest
+
+
+
+
+
+
+
+Request to generate a grounded answer from the model.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`inline_passages`
+ |
+
+`google.ai.generativelanguage.GroundingPassages`
+
+Passages provided inline with the request.
+
+This field is a member of `oneof`_ ``grounding_source``.
+ |
+
+
+`semantic_retriever`
+ |
+
+`google.ai.generativelanguage.SemanticRetrieverConfig`
+
+Content retrieved from resources created via
+the Semantic Retriever API.
+
+This field is a member of `oneof`_ ``grounding_source``.
+ |
+
+
+`model`
+ |
+
+`str`
+
+Required. The name of the ``Model`` to use for generating
+the grounded response.
+
+Format: ``model=models/{model}``.
+ |
+
+
+`contents`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Required. The content of the current conversation with the
+model. For single-turn queries, this is a single question to
+answer. For multi-turn queries, this is a repeated field
+that contains conversation history and the last ``Content``
+in the list containing the question.
+
+Note: GenerateAnswer currently only supports queries in
+English.
+ |
+
+
+`answer_style`
+ |
+
+`google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
+
+Required. Style in which answers should be
+returned.
+ |
+
+
+`safety_settings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetySetting]`
+
+Optional. A list of unique ``SafetySetting`` instances for
+blocking unsafe content.
+
+This will be enforced on the
+GenerateAnswerRequest.contents and
+``GenerateAnswerResponse.candidate``. There should not be
+more than one setting for each ``SafetyCategory`` type. The
+API will block any contents and responses that fail to meet
+the thresholds set by these settings. This list overrides
+the default settings for each ``SafetyCategory`` specified
+in the safety_settings. If there is no ``SafetySetting`` for
+a given ``SafetyCategory`` provided in the list, the API
+will use the default safety setting for that category. Harm
+categories HARM_CATEGORY_HATE_SPEECH,
+HARM_CATEGORY_SEXUALLY_EXPLICIT,
+HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
+are supported.
+ |
+
+
+`temperature`
+ |
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Values can range from [0.0,1.0], inclusive. A value closer
+to 1.0 will produce responses that are more varied and
+creative, while a value closer to 0.0 will typically result
+in more straightforward responses from the model. A low
+temperature (~0.2) is usually recommended for
+Attributed-Question-Answering use cases.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class AnswerStyle`](../../../google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
new file mode 100644
index 000000000..242d337dd
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
@@ -0,0 +1,698 @@
+description: Style for grounded answers.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateAnswerRequest.AnswerStyle
+
+
+
+
+
+
+
+Style for grounded answers.
+
+
+google.generativeai.protos.GenerateAnswerRequest.AnswerStyle(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`ANSWER_STYLE_UNSPECIFIED`
+ |
+
+`0`
+
+Unspecified answer style.
+ |
+
+
+`ABSTRACTIVE`
+ |
+
+`1`
+
+Succint but abstract style.
+ |
+
+
+`EXTRACTIVE`
+ |
+
+`2`
+
+Very brief and extractive style.
+ |
+
+
+`VERBOSE`
+ |
+
+`3`
+
+Verbose style including extra details. The
+response may be formatted as a sentence,
+paragraph, multiple paragraphs, or bullet
+points, etc.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+ABSTRACTIVE
+ |
+
+``
+ |
+
+
+ANSWER_STYLE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+EXTRACTIVE
+ |
+
+``
+ |
+
+
+VERBOSE
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md
new file mode 100644
index 000000000..e4f1ace8c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md
@@ -0,0 +1,104 @@
+description: Response from the model for a grounded answer.
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateAnswerResponse
+
+
+
+
+
+
+
+Response from the model for a grounded answer.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`answer`
+ |
+
+`google.ai.generativelanguage.Candidate`
+
+Candidate answer from the model.
+
+Note: The model *always* attempts to provide a grounded
+answer, even when the answer is unlikely to be answerable
+from the given passages. In that case, a low-quality or
+ungrounded answer may be provided, along with a low
+``answerable_probability``.
+ |
+
+
+`answerable_probability`
+ |
+
+`float`
+
+Output only. The model's estimate of the probability that
+its answer is correct and grounded in the input passages.
+
+A low answerable_probability indicates that the answer might
+not be grounded in the sources.
+
+When ``answerable_probability`` is low, some clients may
+wish to:
+
+- Display a message to the effect of "We couldn’t answer
+ that question" to the user.
+- Fall back to a general-purpose LLM that answers the
+ question from world knowledge. The threshold and nature
+ of such fallbacks will depend on individual clients’ use
+ cases. 0.5 is a good starting threshold.
+
+ |
+
+
+`input_feedback`
+ |
+
+`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback`
+
+Output only. Feedback related to the input data used to
+answer the question, as opposed to model-generated response
+to the question.
+
+"Input data" can be one or more of the following:
+
+- Question specified by the last entry in
+ ``GenerateAnswerRequest.content``
+- Conversation history specified by the other entries in
+ ``GenerateAnswerRequest.content``
+- Grounding sources
+ (GenerateAnswerRequest.semantic_retriever or
+ GenerateAnswerRequest.inline_passages )
+
+ |
+
+
+
+
+
+## Child Classes
+[`class InputFeedback`](../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
new file mode 100644
index 000000000..a987bd401
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
@@ -0,0 +1,65 @@
+description: Feedback related to the input data used to answer the question, as opposed to model-generated response to the question.
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateAnswerResponse.InputFeedback
+
+
+
+
+
+
+
+Feedback related to the input data used to answer the question, as opposed to model-generated response to the question.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`block_reason`
+ |
+
+`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback.BlockReason`
+
+Optional. If set, the input was blocked and
+no candidates are returned. Rephrase your input.
+
+ |
+
+
+`safety_ratings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+Ratings for safety of the input.
+There is at most one rating per category.
+ |
+
+
+
+
+
+## Child Classes
+[`class BlockReason`](../../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
new file mode 100644
index 000000000..f0141fdf4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
@@ -0,0 +1,680 @@
+description: Specifies what was the reason why input was blocked.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason
+
+
+
+
+
+
+
+Specifies what was the reason why input was blocked.
+
+
+google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`BLOCK_REASON_UNSPECIFIED`
+ |
+
+`0`
+
+Default value. This value is unused.
+ |
+
+
+`SAFETY`
+ |
+
+`1`
+
+Input was blocked due to safety reasons. You can inspect
+``safety_ratings`` to understand which safety category
+blocked it.
+ |
+
+
+`OTHER`
+ |
+
+`2`
+
+Input was blocked due to other reasons.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+BLOCK_REASON_UNSPECIFIED
+ |
+
+``
+ |
+
+
+OTHER
+ |
+
+``
+ |
+
+
+SAFETY
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentRequest.md b/docs/api/google/generativeai/protos/GenerateContentRequest.md
new file mode 100644
index 000000000..75ec43867
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentRequest.md
@@ -0,0 +1,151 @@
+description: Request to generate a completion from the model.
+
+
+
+
+
+
+# google.generativeai.protos.GenerateContentRequest
+
+
+
+
+
+
+
+Request to generate a completion from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The name of the ``Model`` to use for generating
+the completion.
+
+Format: ``name=models/{model}``.
+ |
+
+
+`system_instruction`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Optional. Developer set system instruction.
+Currently, text only.
+
+ |
+
+
+`contents`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Required. The content of the current
+conversation with the model.
+For single-turn queries, this is a single
+instance. For multi-turn queries, this is a
+repeated field that contains conversation
+history + latest request.
+ |
+
+
+`tools`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Tool]`
+
+Optional. A list of ``Tools`` the model may use to generate
+the next response.
+
+A ``Tool`` is a piece of code that enables the system to
+interact with external systems to perform an action, or set
+of actions, outside of knowledge and scope of the model. The
+only supported tool is currently ``Function``.
+ |
+
+
+`tool_config`
+ |
+
+`google.ai.generativelanguage.ToolConfig`
+
+Optional. Tool configuration for any ``Tool`` specified in
+the request.
+ |
+
+
+`safety_settings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetySetting]`
+
+Optional. A list of unique ``SafetySetting`` instances for
+blocking unsafe content.
+
+This will be enforced on the
+GenerateContentRequest.contents and
+GenerateContentResponse.candidates . There should not be
+more than one setting for each ``SafetyCategory`` type. The
+API will block any contents and responses that fail to meet
+the thresholds set by these settings. This list overrides
+the default settings for each ``SafetyCategory`` specified
+in the safety_settings. If there is no ``SafetySetting`` for
+a given ``SafetyCategory`` provided in the list, the API
+will use the default safety setting for that category. Harm
+categories HARM_CATEGORY_HATE_SPEECH,
+HARM_CATEGORY_SEXUALLY_EXPLICIT,
+HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
+are supported.
+ |
+
+
+`generation_config`
+ |
+
+`google.ai.generativelanguage.GenerationConfig`
+
+Optional. Configuration options for model
+generation and outputs.
+
+ |
+
+
+`cached_content`
+ |
+
+`str`
+
+Optional. The name of the cached content used as context to
+serve the prediction. Note: only used in explicit caching,
+where users can have control over caching (e.g. what content
+to cache) and enjoy guaranteed cost savings. Format:
+``cachedContents/{cachedContent}``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse.md b/docs/api/google/generativeai/protos/GenerateContentResponse.md
new file mode 100644
index 000000000..b7b2172a6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse.md
@@ -0,0 +1,86 @@
+description: Response from the model supporting multiple candidates.
+
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateContentResponse
+
+
+
+
+
+
+
+Response from the model supporting multiple candidates.
+
+
+
+Note on safety ratings and content filtering. They are reported for
+both prompt in GenerateContentResponse.prompt_feedback
and for
+each candidate in ``finish_reason`` and in ``safety_ratings``. The
+API contract is that:
+
+- either all requested candidates are returned or no candidates at
+ all
+- no candidates are returned only if there was something wrong with
+ the prompt (see ``prompt_feedback``)
+- feedback on each candidate is reported on ``finish_reason`` and
+ ``safety_ratings``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Candidate]`
+
+Candidate responses from the model.
+ |
+
+
+`prompt_feedback`
+ |
+
+`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback`
+
+Returns the prompt's feedback related to the
+content filters.
+ |
+
+
+`usage_metadata`
+ |
+
+`google.ai.generativelanguage.GenerateContentResponse.UsageMetadata`
+
+Output only. Metadata on the generation
+requests' token usage.
+ |
+
+
+
+
+
+## Child Classes
+[`class PromptFeedback`](../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback.md)
+
+[`class UsageMetadata`](../../../google/generativeai/protos/GenerateContentResponse/UsageMetadata.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
new file mode 100644
index 000000000..f953cebc6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
@@ -0,0 +1,64 @@
+description: A set of the feedback metadata the prompt specified in GenerateContentRequest.content.
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateContentResponse.PromptFeedback
+
+
+
+
+
+
+
+A set of the feedback metadata the prompt specified in ``GenerateContentRequest.content``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`block_reason`
+ |
+
+`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback.BlockReason`
+
+Optional. If set, the prompt was blocked and
+no candidates are returned. Rephrase your
+prompt.
+ |
+
+
+`safety_ratings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+Ratings for safety of the prompt.
+There is at most one rating per category.
+ |
+
+
+
+
+
+## Child Classes
+[`class BlockReason`](../../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
new file mode 100644
index 000000000..bd62879f0
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
@@ -0,0 +1,680 @@
+description: Specifies what was the reason why prompt was blocked.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason
+
+
+
+
+
+
+
+Specifies what was the reason why prompt was blocked.
+
+
+google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`BLOCK_REASON_UNSPECIFIED`
+ |
+
+`0`
+
+Default value. This value is unused.
+ |
+
+
+`SAFETY`
+ |
+
+`1`
+
+Prompt was blocked due to safety reasons. You can inspect
+``safety_ratings`` to understand which safety category
+blocked it.
+ |
+
+
+`OTHER`
+ |
+
+`2`
+
+Prompt was blocked due to unknown reasons.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+BLOCK_REASON_UNSPECIFIED
+ |
+
+``
+ |
+
+
+OTHER
+ |
+
+``
+ |
+
+
+SAFETY
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
new file mode 100644
index 000000000..8a5c0e431
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
@@ -0,0 +1,80 @@
+description: Metadata on the generation request's token usage.
+
+
+
+
+
+
+# google.generativeai.protos.GenerateContentResponse.UsageMetadata
+
+
+
+
+
+
+
+Metadata on the generation request's token usage.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`prompt_token_count`
+ |
+
+`int`
+
+Number of tokens in the prompt. When cached_content is set,
+this is still the total effective prompt size. I.e. this
+includes the number of tokens in the cached content.
+ |
+
+
+`cached_content_token_count`
+ |
+
+`int`
+
+Number of tokens in the cached part of the
+prompt, i.e. in the cached content.
+ |
+
+
+`candidates_token_count`
+ |
+
+`int`
+
+Total number of tokens across the generated
+candidates.
+ |
+
+
+`total_token_count`
+ |
+
+`int`
+
+Total token count for the generation request
+(prompt + candidates).
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateMessageRequest.md b/docs/api/google/generativeai/protos/GenerateMessageRequest.md
new file mode 100644
index 000000000..9884bf972
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateMessageRequest.md
@@ -0,0 +1,124 @@
+description: Request to generate a message response from the model.
+
+
+
+
+
+
+# google.generativeai.protos.GenerateMessageRequest
+
+
+
+
+
+
+
+Request to generate a message response from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The name of the model to use.
+
+Format: ``name=models/{model}``.
+ |
+
+
+`prompt`
+ |
+
+`google.ai.generativelanguage.MessagePrompt`
+
+Required. The structured textual input given
+to the model as a prompt.
+Given a
+prompt, the model will return what it predicts
+is the next message in the discussion.
+ |
+
+
+`temperature`
+ |
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Values can range over ``[0.0,1.0]``, inclusive. A value
+closer to ``1.0`` will produce responses that are more
+varied, while a value closer to ``0.0`` will typically
+result in less surprising responses from the model.
+
+ |
+
+
+`candidate_count`
+ |
+
+`int`
+
+Optional. The number of generated response messages to
+return.
+
+This value must be between ``[1, 8]``, inclusive. If unset,
+this will default to ``1``.
+
+ |
+
+
+`top_p`
+ |
+
+`float`
+
+Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Nucleus sampling considers the smallest set of tokens whose
+probability sum is at least ``top_p``.
+
+ |
+
+
+`top_k`
+ |
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateMessageResponse.md b/docs/api/google/generativeai/protos/GenerateMessageResponse.md
new file mode 100644
index 000000000..91a07ac42
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateMessageResponse.md
@@ -0,0 +1,75 @@
+description: The response from the model.
+
+
+
+
+
+
+# google.generativeai.protos.GenerateMessageResponse
+
+
+
+
+
+
+
+The response from the model.
+
+
+
+This includes candidate messages and
+conversation history in the form of chronologically-ordered
+messages.
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Message]`
+
+Candidate response messages from the model.
+ |
+
+
+`messages`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Message]`
+
+The conversation history used by the model.
+ |
+
+
+`filters`
+ |
+
+`MutableSequence[google.ai.generativelanguage.ContentFilter]`
+
+A set of content filtering metadata for the prompt and
+response text.
+
+This indicates which ``SafetyCategory``\ (s) blocked a
+candidate from this response, the lowest ``HarmProbability``
+that triggered a block, and the HarmThreshold setting for
+that category.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateTextRequest.md b/docs/api/google/generativeai/protos/GenerateTextRequest.md
new file mode 100644
index 000000000..05d08c518
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateTextRequest.md
@@ -0,0 +1,189 @@
+description: Request to generate a text completion response from the model.
+
+
+
+
+
+
+# google.generativeai.protos.GenerateTextRequest
+
+
+
+
+
+
+
+Request to generate a text completion response from the model.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`model`
+ |
+
+`str`
+
+Required. The name of the ``Model`` or ``TunedModel`` to use
+for generating the completion. Examples:
+models/text-bison-001 tunedModels/sentence-translator-u3b7m
+ |
+
+
+`prompt`
+ |
+
+`google.ai.generativelanguage.TextPrompt`
+
+Required. The free-form input text given to
+the model as a prompt.
+Given a prompt, the model will generate a
+TextCompletion response it predicts as the
+completion of the input text.
+ |
+
+
+`temperature`
+ |
+
+`float`
+
+Optional. Controls the randomness of the output. Note: The
+default value varies by model, see the Model.temperature
+attribute of the ``Model`` returned the ``getModel``
+function.
+
+Values can range from [0.0,1.0], inclusive. A value closer
+to 1.0 will produce responses that are more varied and
+creative, while a value closer to 0.0 will typically result
+in more straightforward responses from the model.
+
+ |
+
+
+`candidate_count`
+ |
+
+`int`
+
+Optional. Number of generated responses to return.
+
+This value must be between [1, 8], inclusive. If unset, this
+will default to 1.
+
+ |
+
+
+`max_output_tokens`
+ |
+
+`int`
+
+Optional. The maximum number of tokens to include in a
+candidate.
+
+If unset, this will default to output_token_limit specified
+in the ``Model`` specification.
+
+ |
+
+
+`top_p`
+ |
+
+`float`
+
+Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Tokens are sorted based on their assigned probabilities so
+that only the most likely tokens are considered. Top-k
+sampling directly limits the maximum number of tokens to
+consider, while Nucleus sampling limits number of tokens
+based on the cumulative probability.
+
+Note: The default value varies by model, see the
+Model.top_p attribute of the ``Model`` returned the
+``getModel`` function.
+
+ |
+
+
+`top_k`
+ |
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens. Defaults to 40.
+
+Note: The default value varies by model, see the
+Model.top_k attribute of the ``Model`` returned the
+``getModel`` function.
+
+ |
+
+
+`safety_settings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetySetting]`
+
+Optional. A list of unique ``SafetySetting`` instances for
+blocking unsafe content.
+
+that will be enforced on the GenerateTextRequest.prompt
+and GenerateTextResponse.candidates . There should not be
+more than one setting for each ``SafetyCategory`` type. The
+API will block any prompts and responses that fail to meet
+the thresholds set by these settings. This list overrides
+the default settings for each ``SafetyCategory`` specified
+in the safety_settings. If there is no ``SafetySetting`` for
+a given ``SafetyCategory`` provided in the list, the API
+will use the default safety setting for that category. Harm
+categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY,
+HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL,
+HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported
+in text service.
+ |
+
+
+`stop_sequences`
+ |
+
+`MutableSequence[str]`
+
+The set of character sequences (up to 5) that
+will stop output generation. If specified, the
+API will stop at the first appearance of a stop
+sequence. The stop sequence will not be included
+as part of the response.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateTextResponse.md b/docs/api/google/generativeai/protos/GenerateTextResponse.md
new file mode 100644
index 000000000..16ba38748
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateTextResponse.md
@@ -0,0 +1,78 @@
+description: The response from the model, including candidate completions.
+
+
+
+
+
+
+# google.generativeai.protos.GenerateTextResponse
+
+
+
+
+
+
+
+The response from the model, including candidate completions.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+`MutableSequence[google.ai.generativelanguage.TextCompletion]`
+
+Candidate responses from the model.
+ |
+
+
+`filters`
+ |
+
+`MutableSequence[google.ai.generativelanguage.ContentFilter]`
+
+A set of content filtering metadata for the prompt and
+response text.
+
+This indicates which ``SafetyCategory``\ (s) blocked a
+candidate from this response, the lowest ``HarmProbability``
+that triggered a block, and the HarmThreshold setting for
+that category. This indicates the smallest change to the
+``SafetySettings`` that would be necessary to unblock at
+least 1 response.
+
+The blocking is configured by the ``SafetySettings`` in the
+request (or the default ``SafetySettings`` of the API).
+ |
+
+
+`safety_feedback`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetyFeedback]`
+
+Returns any safety feedback related to
+content filtering.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerationConfig.md b/docs/api/google/generativeai/protos/GenerationConfig.md
new file mode 100644
index 000000000..33a0b88bc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerationConfig.md
@@ -0,0 +1,172 @@
+description: Configuration options for model generation and outputs.
+
+
+
+
+
+
+# google.generativeai.protos.GenerationConfig
+
+
+
+
+
+
+
+Configuration options for model generation and outputs.
+
+
+ Not
+all parameters may be configurable for every model.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidate_count`
+ |
+
+`int`
+
+Optional. Number of generated responses to
+return.
+Currently, this value can only be set to 1. If
+unset, this will default to 1.
+
+ |
+
+
+`stop_sequences`
+ |
+
+`MutableSequence[str]`
+
+Optional. The set of character sequences (up
+to 5) that will stop output generation. If
+specified, the API will stop at the first
+appearance of a stop sequence. The stop sequence
+will not be included as part of the response.
+ |
+
+
+`max_output_tokens`
+ |
+
+`int`
+
+Optional. The maximum number of tokens to include in a
+candidate.
+
+Note: The default value varies by model, see the
+Model.output_token_limit attribute of the ``Model``
+returned from the ``getModel`` function.
+
+ |
+
+
+`temperature`
+ |
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Note: The default value varies by model, see the
+Model.temperature attribute of the ``Model`` returned
+from the ``getModel`` function.
+
+Values can range from [0.0, 2.0].
+
+ |
+
+
+`top_p`
+ |
+
+`float`
+
+Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Tokens are sorted based on their assigned probabilities so
+that only the most likely tokens are considered. Top-k
+sampling directly limits the maximum number of tokens to
+consider, while Nucleus sampling limits number of tokens
+based on the cumulative probability.
+
+Note: The default value varies by model, see the
+Model.top_p attribute of the ``Model`` returned from the
+``getModel`` function.
+
+ |
+
+
+`top_k`
+ |
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+Models use nucleus sampling or combined Top-k and nucleus
+sampling. Top-k sampling considers the set of ``top_k`` most
+probable tokens. Models running with nucleus sampling don't
+allow top_k setting.
+
+Note: The default value varies by model, see the
+Model.top_k attribute of the ``Model`` returned from the
+``getModel`` function. Empty ``top_k`` field in ``Model``
+indicates the model doesn't apply top-k sampling and doesn't
+allow setting ``top_k`` on requests.
+
+ |
+
+
+`response_mime_type`
+ |
+
+`str`
+
+Optional. Output response mimetype of the generated
+candidate text. Supported mimetype: ``text/plain``:
+(default) Text output. ``application/json``: JSON response
+in the candidates.
+ |
+
+
+`response_schema`
+ |
+
+`google.ai.generativelanguage.Schema`
+
+Optional. Output response schema of the generated candidate
+text when response mime type can have schema. Schema can be
+objects, primitives or arrays and is a subset of `OpenAPI
+schema `__.
+
+If set, a compatible response_mime_type must also be set.
+Compatible mimetypes: ``application/json``: Schema for JSON
+response.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetCachedContentRequest.md b/docs/api/google/generativeai/protos/GetCachedContentRequest.md
new file mode 100644
index 000000000..c9cab78c1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetCachedContentRequest.md
@@ -0,0 +1,49 @@
+description: Request to read CachedContent.
+
+
+
+
+
+
+# google.generativeai.protos.GetCachedContentRequest
+
+
+
+
+
+
+
+Request to read CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name referring to the content cache
+entry. Format: ``cachedContents/{id}``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetChunkRequest.md b/docs/api/google/generativeai/protos/GetChunkRequest.md
new file mode 100644
index 000000000..575de2729
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetChunkRequest.md
@@ -0,0 +1,49 @@
+description: Request for getting information about a specific Chunk.
+
+
+
+
+
+
+# google.generativeai.protos.GetChunkRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``Chunk`` to retrieve. Example:
+``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetCorpusRequest.md b/docs/api/google/generativeai/protos/GetCorpusRequest.md
new file mode 100644
index 000000000..fe4d3092b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetCorpusRequest.md
@@ -0,0 +1,49 @@
+description: Request for getting information about a specific Corpus.
+
+
+
+
+
+
+# google.generativeai.protos.GetCorpusRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``Corpus``. Example:
+``corpora/my-corpus-123``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetDocumentRequest.md b/docs/api/google/generativeai/protos/GetDocumentRequest.md
new file mode 100644
index 000000000..1959ea46c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetDocumentRequest.md
@@ -0,0 +1,49 @@
+description: Request for getting information about a specific Document.
+
+
+
+
+
+
+# google.generativeai.protos.GetDocumentRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``Document`` to retrieve. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetFileRequest.md b/docs/api/google/generativeai/protos/GetFileRequest.md
new file mode 100644
index 000000000..de6b98e50
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetFileRequest.md
@@ -0,0 +1,49 @@
+description: Request for GetFile.
+
+
+
+
+
+
+# google.generativeai.protos.GetFileRequest
+
+
+
+
+
+
+
+Request for ``GetFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``File`` to get. Example:
+``files/abc-123``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetModelRequest.md b/docs/api/google/generativeai/protos/GetModelRequest.md
new file mode 100644
index 000000000..de91a7abe
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetModelRequest.md
@@ -0,0 +1,53 @@
+description: Request for getting information about a specific Model.
+
+
+
+
+
+
+# google.generativeai.protos.GetModelRequest
+
+
+
+
+
+
+
+Request for getting information about a specific Model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the model.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetPermissionRequest.md b/docs/api/google/generativeai/protos/GetPermissionRequest.md
new file mode 100644
index 000000000..dd6850a87
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetPermissionRequest.md
@@ -0,0 +1,52 @@
+description: Request for getting information about a specific Permission.
+
+
+
+
+
+
+# google.generativeai.protos.GetPermissionRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the permission.
+
+Formats:
+``tunedModels/{tuned_model}/permissions/{permission}``
+``corpora/{corpus}/permissions/{permission}``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetTunedModelRequest.md b/docs/api/google/generativeai/protos/GetTunedModelRequest.md
new file mode 100644
index 000000000..c34e11e1d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetTunedModelRequest.md
@@ -0,0 +1,50 @@
+description: Request for getting information about a specific Model.
+
+
+
+
+
+
+# google.generativeai.protos.GetTunedModelRequest
+
+
+
+
+
+
+
+Request for getting information about a specific Model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the model.
+
+Format: ``tunedModels/my-model-id``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingAttribution.md b/docs/api/google/generativeai/protos/GroundingAttribution.md
new file mode 100644
index 000000000..553ea8533
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingAttribution.md
@@ -0,0 +1,59 @@
+description: Attribution for a source that contributed to an answer.
+
+
+
+
+
+
+# google.generativeai.protos.GroundingAttribution
+
+
+
+
+
+
+
+Attribution for a source that contributed to an answer.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`source_id`
+ |
+
+`google.ai.generativelanguage.AttributionSourceId`
+
+Output only. Identifier for the source
+contributing to this attribution.
+ |
+
+
+`content`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Grounding source content that makes up this
+attribution.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingPassage.md b/docs/api/google/generativeai/protos/GroundingPassage.md
new file mode 100644
index 000000000..f63c2e18a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingPassage.md
@@ -0,0 +1,58 @@
+description: Passage included inline with a grounding configuration.
+
+
+
+
+
+
+# google.generativeai.protos.GroundingPassage
+
+
+
+
+
+
+
+Passage included inline with a grounding configuration.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`id`
+ |
+
+`str`
+
+Identifier for the passage for attributing
+this passage in grounded answers.
+ |
+
+
+`content`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Content of the passage.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingPassages.md b/docs/api/google/generativeai/protos/GroundingPassages.md
new file mode 100644
index 000000000..1cde963d3
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingPassages.md
@@ -0,0 +1,48 @@
+description: A repeated list of passages.
+
+
+
+
+
+
+# google.generativeai.protos.GroundingPassages
+
+
+
+
+
+
+
+A repeated list of passages.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`passages`
+ |
+
+`MutableSequence[google.ai.generativelanguage.GroundingPassage]`
+
+List of passages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/HarmCategory.md b/docs/api/google/generativeai/protos/HarmCategory.md
new file mode 100644
index 000000000..54f1b3767
--- /dev/null
+++ b/docs/api/google/generativeai/protos/HarmCategory.md
@@ -0,0 +1,822 @@
+description: The category of a rating.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.HarmCategory
+
+
+
+
+
+
+
+The category of a rating.
+
+
+google.generativeai.protos.HarmCategory(
+ *args, **kwds
+)
+
+
+
+
+
+
+These categories cover various kinds of harms that developers
+may wish to adjust.
+
+
+
+
+Values |
+
+
+
+`HARM_CATEGORY_UNSPECIFIED`
+ |
+
+`0`
+
+Category is unspecified.
+ |
+
+
+`HARM_CATEGORY_DEROGATORY`
+ |
+
+`1`
+
+Negative or harmful comments targeting
+identity and/or protected attribute.
+ |
+
+
+`HARM_CATEGORY_TOXICITY`
+ |
+
+`2`
+
+Content that is rude, disrespectful, or
+profane.
+ |
+
+
+`HARM_CATEGORY_VIOLENCE`
+ |
+
+`3`
+
+Describes scenarios depicting violence
+against an individual or group, or general
+descriptions of gore.
+ |
+
+
+`HARM_CATEGORY_SEXUAL`
+ |
+
+`4`
+
+Contains references to sexual acts or other
+lewd content.
+ |
+
+
+`HARM_CATEGORY_MEDICAL`
+ |
+
+`5`
+
+Promotes unchecked medical advice.
+ |
+
+
+`HARM_CATEGORY_DANGEROUS`
+ |
+
+`6`
+
+Dangerous content that promotes, facilitates,
+or encourages harmful acts.
+ |
+
+
+`HARM_CATEGORY_HARASSMENT`
+ |
+
+`7`
+
+Harasment content.
+ |
+
+
+`HARM_CATEGORY_HATE_SPEECH`
+ |
+
+`8`
+
+Hate speech and content.
+ |
+
+
+`HARM_CATEGORY_SEXUALLY_EXPLICIT`
+ |
+
+`9`
+
+Sexually explicit content.
+ |
+
+
+`HARM_CATEGORY_DANGEROUS_CONTENT`
+ |
+
+`10`
+
+Dangerous content.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+HARM_CATEGORY_DANGEROUS
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_DANGEROUS_CONTENT
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_DEROGATORY
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_HARASSMENT
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_HATE_SPEECH
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_MEDICAL
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_SEXUAL
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_SEXUALLY_EXPLICIT
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_TOXICITY
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_UNSPECIFIED
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_VIOLENCE
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Hyperparameters.md b/docs/api/google/generativeai/protos/Hyperparameters.md
new file mode 100644
index 000000000..f5e17d6ee
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Hyperparameters.md
@@ -0,0 +1,100 @@
+description: Hyperparameters controlling the tuning process.
+
+
+
+
+
+
+# google.generativeai.protos.Hyperparameters
+
+
+
+
+
+
+
+Hyperparameters controlling the tuning process.
+
+
+ Read more at
+https://ai.google.dev/docs/model_tuning_guidance
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`learning_rate`
+ |
+
+`float`
+
+Optional. Immutable. The learning rate
+hyperparameter for tuning. If not set, a default
+of 0.001 or 0.0002 will be calculated based on
+the number of training examples.
+
+This field is a member of `oneof`_ ``learning_rate_option``.
+ |
+
+
+`learning_rate_multiplier`
+ |
+
+`float`
+
+Optional. Immutable. The learning rate multiplier is used to
+calculate a final learning_rate based on the default
+(recommended) value. Actual learning rate :=
+learning_rate_multiplier \* default learning rate Default
+learning rate is dependent on base model and dataset size.
+If not set, a default of 1.0 will be used.
+
+This field is a member of `oneof`_ ``learning_rate_option``.
+ |
+
+
+`epoch_count`
+ |
+
+`int`
+
+Immutable. The number of training epochs. An
+epoch is one pass through the training data. If
+not set, a default of 5 will be used.
+
+ |
+
+
+`batch_size`
+ |
+
+`int`
+
+Immutable. The batch size hyperparameter for
+tuning. If not set, a default of 4 or 16 will be
+used based on the number of training examples.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCachedContentsRequest.md b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md
new file mode 100644
index 000000000..9d6da05b7
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md
@@ -0,0 +1,68 @@
+description: Request to list CachedContents.
+
+
+
+
+
+
+# google.generativeai.protos.ListCachedContentsRequest
+
+
+
+
+
+
+
+Request to list CachedContents.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. The maximum number of cached
+contents to return. The service may return fewer
+than this value. If unspecified, some default
+(under maximum) number of items will be
+returned. The maximum value is 1000; values
+above 1000 will be coerced to 1000.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token, received from a previous
+``ListCachedContents`` call. Provide this to retrieve the
+subsequent page.
+
+When paginating, all other parameters provided to
+``ListCachedContents`` must match the call that provided the
+page token.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCachedContentsResponse.md b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md
new file mode 100644
index 000000000..ba3e10e69
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md
@@ -0,0 +1,59 @@
+description: Response with CachedContents list.
+
+
+
+
+
+
+# google.generativeai.protos.ListCachedContentsResponse
+
+
+
+
+
+
+
+Response with CachedContents list.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`cached_contents`
+ |
+
+`MutableSequence[google.ai.generativelanguage.CachedContent]`
+
+List of cached contents.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no subsequent
+pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListChunksRequest.md b/docs/api/google/generativeai/protos/ListChunksRequest.md
new file mode 100644
index 000000000..2150c0630
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListChunksRequest.md
@@ -0,0 +1,80 @@
+description: Request for listing Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.ListChunksRequest
+
+
+
+
+
+
+
+Request for listing ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Required. The name of the ``Document`` containing
+``Chunk``\ s. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. The maximum number of ``Chunk``\ s to return (per
+page). The service may return fewer ``Chunk``\ s.
+
+If unspecified, at most 10 ``Chunk``\ s will be returned.
+The maximum size limit is 100 ``Chunk``\ s per page.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token, received from a previous
+``ListChunks`` call.
+
+Provide the ``next_page_token`` returned in the response as
+an argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListChunks`` must match the call that provided the page
+token.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListChunksResponse.md b/docs/api/google/generativeai/protos/ListChunksResponse.md
new file mode 100644
index 000000000..38178a68e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListChunksResponse.md
@@ -0,0 +1,60 @@
+description: Response from ListChunks containing a paginated list of Chunk\ s.
+
+
+
+
+
+
+# google.generativeai.protos.ListChunksResponse
+
+
+
+
+
+
+
+Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s.
+
+
+ The ``Chunk``\ s are sorted by ascending
+``chunk.create_time``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`chunks`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Chunk]`
+
+The returned ``Chunk``\ s.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no more
+pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCorporaRequest.md b/docs/api/google/generativeai/protos/ListCorporaRequest.md
new file mode 100644
index 000000000..d6de5ef51
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCorporaRequest.md
@@ -0,0 +1,69 @@
+description: Request for listing Corpora.
+
+
+
+
+
+
+# google.generativeai.protos.ListCorporaRequest
+
+
+
+
+
+
+
+Request for listing ``Corpora``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. The maximum number of ``Corpora`` to return (per
+page). The service may return fewer ``Corpora``.
+
+If unspecified, at most 10 ``Corpora`` will be returned. The
+maximum size limit is 20 ``Corpora`` per page.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token, received from a previous
+``ListCorpora`` call.
+
+Provide the ``next_page_token`` returned in the response as
+an argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListCorpora`` must match the call that provided the page
+token.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCorporaResponse.md b/docs/api/google/generativeai/protos/ListCorporaResponse.md
new file mode 100644
index 000000000..8b58310d4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCorporaResponse.md
@@ -0,0 +1,60 @@
+description: Response from ListCorpora containing a paginated list of Corpora.
+
+
+
+
+
+
+# google.generativeai.protos.ListCorporaResponse
+
+
+
+
+
+
+
+Response from ``ListCorpora`` containing a paginated list of ``Corpora``.
+
+
+ The results are sorted by ascending
+``corpus.create_time``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`corpora`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Corpus]`
+
+The returned corpora.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no more
+pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListDocumentsRequest.md b/docs/api/google/generativeai/protos/ListDocumentsRequest.md
new file mode 100644
index 000000000..ce628dfef
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListDocumentsRequest.md
@@ -0,0 +1,79 @@
+description: Request for listing Document\ s.
+
+
+
+
+
+
+# google.generativeai.protos.ListDocumentsRequest
+
+
+
+
+
+
+
+Request for listing ``Document``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Required. The name of the ``Corpus`` containing
+``Document``\ s. Example: ``corpora/my-corpus-123``
+ |
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. The maximum number of ``Document``\ s to return
+(per page). The service may return fewer ``Document``\ s.
+
+If unspecified, at most 10 ``Document``\ s will be returned.
+The maximum size limit is 20 ``Document``\ s per page.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token, received from a previous
+``ListDocuments`` call.
+
+Provide the ``next_page_token`` returned in the response as
+an argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListDocuments`` must match the call that provided the page
+token.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListDocumentsResponse.md b/docs/api/google/generativeai/protos/ListDocumentsResponse.md
new file mode 100644
index 000000000..7d4f7b4c8
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListDocumentsResponse.md
@@ -0,0 +1,60 @@
+description: Response from ListDocuments containing a paginated list of Document\ s.
+
+
+
+
+
+
+# google.generativeai.protos.ListDocumentsResponse
+
+
+
+
+
+
+
+Response from ``ListDocuments`` containing a paginated list of ``Document``\ s.
+
+
+ The ``Document``\ s are sorted by ascending
+``document.create_time``.
+
+
+
+
+
+
+Attributes |
+
+
+
+`documents`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Document]`
+
+The returned ``Document``\ s.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no more
+pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListFilesRequest.md b/docs/api/google/generativeai/protos/ListFilesRequest.md
new file mode 100644
index 000000000..5b4e3e08e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListFilesRequest.md
@@ -0,0 +1,59 @@
+description: Request for ListFiles.
+
+
+
+
+
+
+# google.generativeai.protos.ListFilesRequest
+
+
+
+
+
+
+
+Request for ``ListFiles``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. Maximum number of ``File``\ s to return per page.
+If unspecified, defaults to 10. Maximum ``page_size`` is
+100.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token from a previous ``ListFiles`` call.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListFilesResponse.md b/docs/api/google/generativeai/protos/ListFilesResponse.md
new file mode 100644
index 000000000..3f1045ebe
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListFilesResponse.md
@@ -0,0 +1,58 @@
+description: Response for ListFiles.
+
+
+
+
+
+
+# google.generativeai.protos.ListFilesResponse
+
+
+
+
+
+
+
+Response for ``ListFiles``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`files`
+ |
+
+`MutableSequence[google.ai.generativelanguage.File]`
+
+The list of ``File``\ s.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token that can be sent as a ``page_token`` into a
+subsequent ``ListFiles`` call.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListModelsRequest.md b/docs/api/google/generativeai/protos/ListModelsRequest.md
new file mode 100644
index 000000000..50095c67b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListModelsRequest.md
@@ -0,0 +1,69 @@
+description: Request for listing all Models.
+
+
+
+
+
+
+# google.generativeai.protos.ListModelsRequest
+
+
+
+
+
+
+
+Request for listing all Models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`page_size`
+ |
+
+`int`
+
+The maximum number of ``Models`` to return (per page).
+
+The service may return fewer models. If unspecified, at most
+50 models will be returned per page. This method returns at
+most 1000 models per page, even if you pass a larger
+page_size.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+A page token, received from a previous ``ListModels`` call.
+
+Provide the ``page_token`` returned by one request as an
+argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListModels`` must match the call that provided the page
+token.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListModelsResponse.md b/docs/api/google/generativeai/protos/ListModelsResponse.md
new file mode 100644
index 000000000..062d45370
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListModelsResponse.md
@@ -0,0 +1,60 @@
+description: Response from ListModel containing a paginated list of Models.
+
+
+
+
+
+
+# google.generativeai.protos.ListModelsResponse
+
+
+
+
+
+
+
+Response from ``ListModel`` containing a paginated list of Models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`models`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Model]`
+
+The returned Models.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page.
+
+If this field is omitted, there are no more pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListPermissionsRequest.md b/docs/api/google/generativeai/protos/ListPermissionsRequest.md
new file mode 100644
index 000000000..1b25c7132
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListPermissionsRequest.md
@@ -0,0 +1,80 @@
+description: Request for listing permissions.
+
+
+
+
+
+
+# google.generativeai.protos.ListPermissionsRequest
+
+
+
+
+
+
+
+Request for listing permissions.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+`str`
+
+Required. The parent resource of the permissions. Formats:
+``tunedModels/{tuned_model}`` ``corpora/{corpus}``
+ |
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. The maximum number of ``Permission``\ s to return
+(per page). The service may return fewer permissions.
+
+If unspecified, at most 10 permissions will be returned.
+This method returns at most 1000 permissions per page, even
+if you pass larger page_size.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token, received from a previous
+``ListPermissions`` call.
+
+Provide the ``page_token`` returned by one request as an
+argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListPermissions`` must match the call that provided the
+page token.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListPermissionsResponse.md b/docs/api/google/generativeai/protos/ListPermissionsResponse.md
new file mode 100644
index 000000000..f6e0ace5c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListPermissionsResponse.md
@@ -0,0 +1,60 @@
+description: Response from ListPermissions containing a paginated list of permissions.
+
+
+
+
+
+
+# google.generativeai.protos.ListPermissionsResponse
+
+
+
+
+
+
+
+Response from ``ListPermissions`` containing a paginated list of permissions.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`permissions`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Permission]`
+
+Returned permissions.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page.
+
+If this field is omitted, there are no more pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListTunedModelsRequest.md b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md
new file mode 100644
index 000000000..f8ac44e25
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md
@@ -0,0 +1,97 @@
+description: Request for listing TunedModels.
+
+
+
+
+
+
+# google.generativeai.protos.ListTunedModelsRequest
+
+
+
+
+
+
+
+Request for listing TunedModels.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`page_size`
+ |
+
+`int`
+
+Optional. The maximum number of ``TunedModels`` to return
+(per page). The service may return fewer tuned models.
+
+If unspecified, at most 10 tuned models will be returned.
+This method returns at most 1000 models per page, even if
+you pass a larger page_size.
+ |
+
+
+`page_token`
+ |
+
+`str`
+
+Optional. A page token, received from a previous
+``ListTunedModels`` call.
+
+Provide the ``page_token`` returned by one request as an
+argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListTunedModels`` must match the call that provided the
+page token.
+ |
+
+
+`filter`
+ |
+
+`str`
+
+Optional. A filter is a full text search over
+the tuned model's description and display name.
+By default, results will not include tuned
+models shared with everyone.
+
+Additional operators:
+
+ - owner:me
+ - writers:me
+ - readers:me
+ - readers:everyone
+
+Examples:
+
+ "owner:me" returns all tuned models to which
+caller has owner role "readers:me" returns all
+tuned models to which caller has reader role
+"readers:everyone" returns all tuned models that
+are shared with everyone
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListTunedModelsResponse.md b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md
new file mode 100644
index 000000000..0f247c4dc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md
@@ -0,0 +1,60 @@
+description: Response from ListTunedModels containing a paginated list of Models.
+
+
+
+
+
+
+# google.generativeai.protos.ListTunedModelsResponse
+
+
+
+
+
+
+
+Response from ``ListTunedModels`` containing a paginated list of Models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`tuned_models`
+ |
+
+`MutableSequence[google.ai.generativelanguage.TunedModel]`
+
+The returned Models.
+ |
+
+
+`next_page_token`
+ |
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page.
+
+If this field is omitted, there are no more pages.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Message.md b/docs/api/google/generativeai/protos/Message.md
new file mode 100644
index 000000000..94d7d17af
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Message.md
@@ -0,0 +1,86 @@
+description: The base unit of structured text.
+
+
+
+
+
+
+# google.generativeai.protos.Message
+
+
+
+
+
+
+
+The base unit of structured text.
+
+
+
+A ``Message`` includes an ``author`` and the ``content`` of the
+``Message``.
+
+The ``author`` is used to tag messages when they are fed to the
+model as text.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`author`
+ |
+
+`str`
+
+Optional. The author of this Message.
+
+This serves as a key for tagging
+the content of this Message when it is fed to
+the model as text.
+
+The author can be any alphanumeric string.
+ |
+
+
+`content`
+ |
+
+`str`
+
+Required. The text content of the structured ``Message``.
+ |
+
+
+`citation_metadata`
+ |
+
+`google.ai.generativelanguage.CitationMetadata`
+
+Output only. Citation information for model-generated
+``content`` in this ``Message``.
+
+If this ``Message`` was generated as output from the model,
+this field may be populated with attribution information for
+any text included in the ``content``. This field is used
+only on output.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/MessagePrompt.md b/docs/api/google/generativeai/protos/MessagePrompt.md
new file mode 100644
index 000000000..2386e5bb5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/MessagePrompt.md
@@ -0,0 +1,103 @@
+description: All of the structured input text passed to the model as a prompt.
+
+
+
+
+
+
+# google.generativeai.protos.MessagePrompt
+
+
+
+
+
+
+
+All of the structured input text passed to the model as a prompt.
+
+
+
+A ``MessagePrompt`` contains a structured set of fields that provide
+context for the conversation, examples of user input/model output
+message pairs that prime the model to respond in different ways, and
+the conversation history or list of messages representing the
+alternating turns of the conversation between the user and the
+model.
+
+
+
+
+
+
+Attributes |
+
+
+
+`context`
+ |
+
+`str`
+
+Optional. Text that should be provided to the model first to
+ground the response.
+
+If not empty, this ``context`` will be given to the model
+first before the ``examples`` and ``messages``. When using a
+``context`` be sure to provide it with every request to
+maintain continuity.
+
+This field can be a description of your prompt to the model
+to help provide context and guide the responses. Examples:
+"Translate the phrase from English to French." or "Given a
+statement, classify the sentiment as happy, sad or neutral."
+
+Anything included in this field will take precedence over
+message history if the total input size exceeds the model's
+``input_token_limit`` and the input request is truncated.
+ |
+
+
+`examples`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Example]`
+
+Optional. Examples of what the model should generate.
+
+This includes both user input and the response that the
+model should emulate.
+
+These ``examples`` are treated identically to conversation
+messages except that they take precedence over the history
+in ``messages``: If the total input size exceeds the model's
+``input_token_limit`` the input will be truncated. Items
+will be dropped from ``messages`` before ``examples``.
+ |
+
+
+`messages`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Message]`
+
+Required. A snapshot of the recent conversation history
+sorted chronologically.
+
+Turns alternate between two authors.
+
+If the total input size exceeds the model's
+``input_token_limit`` the input will be truncated: The
+oldest items will be dropped from ``messages``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/MetadataFilter.md b/docs/api/google/generativeai/protos/MetadataFilter.md
new file mode 100644
index 000000000..447a76665
--- /dev/null
+++ b/docs/api/google/generativeai/protos/MetadataFilter.md
@@ -0,0 +1,63 @@
+description: User provided filter to limit retrieval based on Chunk or Document level metadata values.
+
+
+
+
+
+
+# google.generativeai.protos.MetadataFilter
+
+
+
+
+
+
+
+User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values.
+
+
+ Example (genre = drama OR genre
+= action): key = "document.custom_metadata.genre" conditions =
+[{string_value = "drama", operation = EQUAL}, {string_value =
+"action", operation = EQUAL}]
+
+
+
+
+
+
+Attributes |
+
+
+
+`key`
+ |
+
+`str`
+
+Required. The key of the metadata to filter
+on.
+ |
+
+
+`conditions`
+ |
+
+`MutableSequence[google.ai.generativelanguage.Condition]`
+
+Required. The ``Condition``\ s for the given key that will
+trigger this filter. Multiple ``Condition``\ s are joined by
+logical ORs.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Model.md b/docs/api/google/generativeai/protos/Model.md
new file mode 100644
index 000000000..c4105e24e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Model.md
@@ -0,0 +1,193 @@
+description: Information about a Generative Language Model.
+
+
+
+
+
+
+# google.generativeai.protos.Model
+
+
+
+
+
+
+
+Information about a Generative Language Model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the ``Model``.
+
+Format: ``models/{model}`` with a ``{model}`` naming
+convention of:
+
+- "{base_model_id}-{version}"
+
+Examples:
+
+- ``models/chat-bison-001``
+ |
+
+
+`base_model_id`
+ |
+
+`str`
+
+Required. The name of the base model, pass this to the
+generation request.
+
+Examples:
+
+- ``chat-bison``
+ |
+
+
+`version`
+ |
+
+`str`
+
+Required. The version number of the model.
+
+This represents the major version
+ |
+
+
+`display_name`
+ |
+
+`str`
+
+The human-readable name of the model. E.g.
+"Chat Bison".
+The name can be up to 128 characters long and
+can consist of any UTF-8 characters.
+ |
+
+
+`description`
+ |
+
+`str`
+
+A short description of the model.
+ |
+
+
+`input_token_limit`
+ |
+
+`int`
+
+Maximum number of input tokens allowed for
+this model.
+ |
+
+
+`output_token_limit`
+ |
+
+`int`
+
+Maximum number of output tokens available for
+this model.
+ |
+
+
+`supported_generation_methods`
+ |
+
+`MutableSequence[str]`
+
+The model's supported generation methods.
+
+The method names are defined as Pascal case strings, such as
+``generateMessage`` which correspond to API methods.
+ |
+
+
+`temperature`
+ |
+
+`float`
+
+Controls the randomness of the output.
+
+Values can range over ``[0.0,max_temperature]``, inclusive.
+A higher value will produce responses that are more varied,
+while a value closer to ``0.0`` will typically result in
+less surprising responses from the model. This value
+specifies default to be used by the backend while making the
+call to the model.
+
+ |
+
+
+`max_temperature`
+ |
+
+`float`
+
+The maximum temperature this model can use.
+
+ |
+
+
+`top_p`
+ |
+
+`float`
+
+For Nucleus sampling.
+
+Nucleus sampling considers the smallest set of tokens whose
+probability sum is at least ``top_p``. This value specifies
+default to be used by the backend while making the call to
+the model.
+
+ |
+
+
+`top_k`
+ |
+
+`int`
+
+For Top-k sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens. This value specifies default to be used by the
+backend while making the call to the model. If empty,
+indicates the model doesn't use top-k sampling, and
+``top_k`` isn't allowed as a generation parameter.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Part.md b/docs/api/google/generativeai/protos/Part.md
new file mode 100644
index 000000000..24e8ba0a6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Part.md
@@ -0,0 +1,136 @@
+description: A datatype containing media that is part of a multi-part Content message.
+
+
+
+
+
+
+# google.generativeai.protos.Part
+
+
+
+
+
+
+
+A datatype containing media that is part of a multi-part ``Content`` message.
+
+
+
+A ``Part`` consists of data which has an associated datatype. A
+``Part`` can only contain one of the accepted types in
+``Part.data``.
+
+A ``Part`` must have a fixed IANA MIME type identifying the type and
+subtype of the media if the ``inline_data`` field is filled with raw
+bytes.
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`text`
+ |
+
+`str`
+
+Inline text.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+`inline_data`
+ |
+
+`google.ai.generativelanguage.Blob`
+
+Inline media bytes.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+`function_call`
+ |
+
+`google.ai.generativelanguage.FunctionCall`
+
+A predicted ``FunctionCall`` returned from the model that
+contains a string representing the
+FunctionDeclaration.name with the arguments and their
+values.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+`function_response`
+ |
+
+`google.ai.generativelanguage.FunctionResponse`
+
+The result output of a ``FunctionCall`` that contains a
+string representing the FunctionDeclaration.name and a
+structured JSON object containing any output from the
+function is used as context to the model.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+`file_data`
+ |
+
+`google.ai.generativelanguage.FileData`
+
+URI based data.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+`executable_code`
+ |
+
+`google.ai.generativelanguage.ExecutableCode`
+
+Code generated by the model that is meant to
+be executed.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+`code_execution_result`
+ |
+
+`google.ai.generativelanguage.CodeExecutionResult`
+
+Result of executing the ``ExecutableCode``.
+
+This field is a member of `oneof`_ ``data``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Permission.md b/docs/api/google/generativeai/protos/Permission.md
new file mode 100644
index 000000000..de4165fcb
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Permission.md
@@ -0,0 +1,110 @@
+description: Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Permission
+
+
+
+
+
+
+
+Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
+
+
+ a tuned model,
+corpus).
+
+A role is a collection of permitted operations that allows users
+to perform specific actions on PaLM API resources. To make them
+available to users, groups, or service accounts, you assign
+roles. When you assign a role, you grant permissions that the
+role contains.
+
+There are three concentric roles. Each role is a superset of the
+previous role's permitted operations:
+
+- reader can use the resource (e.g. tuned model, corpus) for
+ inference
+- writer has reader's permissions and additionally can edit and
+ share
+- owner has writer's permissions and additionally can delete
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Output only. Identifier. The permission name. A unique name
+will be generated on create. Examples:
+tunedModels/{tuned_model}/permissions/{permission}
+corpora/{corpus}/permissions/{permission} Output only.
+ |
+
+
+`grantee_type`
+ |
+
+`google.ai.generativelanguage.Permission.GranteeType`
+
+Optional. Immutable. The type of the grantee.
+
+ |
+
+
+`email_address`
+ |
+
+`str`
+
+Optional. Immutable. The email address of the
+user of group which this permission refers.
+Field is not set when permission's grantee type
+is EVERYONE.
+
+ |
+
+
+`role`
+ |
+
+`google.ai.generativelanguage.Permission.Role`
+
+Required. The role granted by this
+permission.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class GranteeType`](../../../google/generativeai/protos/Permission/GranteeType.md)
+
+[`class Role`](../../../google/generativeai/protos/Permission/Role.md)
+
diff --git a/docs/api/google/generativeai/protos/Permission/GranteeType.md b/docs/api/google/generativeai/protos/Permission/GranteeType.md
new file mode 100644
index 000000000..ef0d1f378
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Permission/GranteeType.md
@@ -0,0 +1,698 @@
+description: Defines types of the grantee of this permission.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Permission.GranteeType
+
+
+
+
+
+
+
+Defines types of the grantee of this permission.
+
+
+google.generativeai.protos.Permission.GranteeType(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`GRANTEE_TYPE_UNSPECIFIED`
+ |
+
+`0`
+
+The default value. This value is unused.
+ |
+
+
+`USER`
+ |
+
+`1`
+
+Represents a user. When set, you must provide email_address
+for the user.
+ |
+
+
+`GROUP`
+ |
+
+`2`
+
+Represents a group. When set, you must provide email_address
+for the group.
+ |
+
+
+`EVERYONE`
+ |
+
+`3`
+
+Represents access to everyone. No extra
+information is required.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+EVERYONE
+ |
+
+``
+ |
+
+
+GRANTEE_TYPE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+GROUP
+ |
+
+``
+ |
+
+
+USER
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Permission/Role.md b/docs/api/google/generativeai/protos/Permission/Role.md
new file mode 100644
index 000000000..a59665a55
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Permission/Role.md
@@ -0,0 +1,697 @@
+description: Defines the role granted by this permission.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Permission.Role
+
+
+
+
+
+
+
+Defines the role granted by this permission.
+
+
+google.generativeai.protos.Permission.Role(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`ROLE_UNSPECIFIED`
+ |
+
+`0`
+
+The default value. This value is unused.
+ |
+
+
+`OWNER`
+ |
+
+`1`
+
+Owner can use, update, share and delete the
+resource.
+ |
+
+
+`WRITER`
+ |
+
+`2`
+
+Writer can use, update and share the
+resource.
+ |
+
+
+`READER`
+ |
+
+`3`
+
+Reader can use the resource.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+OWNER
+ |
+
+``
+ |
+
+
+READER
+ |
+
+``
+ |
+
+
+ROLE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+WRITER
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryCorpusRequest.md b/docs/api/google/generativeai/protos/QueryCorpusRequest.md
new file mode 100644
index 000000000..60d839b1f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryCorpusRequest.md
@@ -0,0 +1,109 @@
+description: Request for querying a Corpus.
+
+
+
+
+
+
+# google.generativeai.protos.QueryCorpusRequest
+
+
+
+
+
+
+
+Request for querying a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``Corpus`` to query. Example:
+``corpora/my-corpus-123``
+ |
+
+
+`query`
+ |
+
+`str`
+
+Required. Query string to perform semantic
+search.
+ |
+
+
+`metadata_filters`
+ |
+
+`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
+
+Optional. Filter for ``Chunk`` and ``Document`` metadata.
+Each ``MetadataFilter`` object should correspond to a unique
+key. Multiple ``MetadataFilter`` objects are joined by
+logical "AND"s.
+
+Example query at document level: (year >= 2020 OR year <
+2010) AND (genre = drama OR genre = action)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"document.custom_metadata.year" conditions = [{int_value =
+2020, operation = GREATER_EQUAL}, {int_value = 2010,
+operation = LESS}]}, {key = "document.custom_metadata.year"
+conditions = [{int_value = 2020, operation = GREATER_EQUAL},
+{int_value = 2010, operation = LESS}]}, {key =
+"document.custom_metadata.genre" conditions = [{string_value
+= "drama", operation = EQUAL}, {string_value = "action",
+operation = EQUAL}]}]
+
+Example query at chunk level for a numeric range of values:
+(year > 2015 AND year <= 2020)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2015, operation = GREATER}]}, {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2020, operation = LESS_EQUAL}]}]
+
+Note: "AND"s for the same key are only supported for numeric
+values. String values only support "OR"s for the same key.
+ |
+
+
+`results_count`
+ |
+
+`int`
+
+Optional. The maximum number of ``Chunk``\ s to return. The
+service may return fewer ``Chunk``\ s.
+
+If unspecified, at most 10 ``Chunk``\ s will be returned.
+The maximum specified result count is 100.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryCorpusResponse.md b/docs/api/google/generativeai/protos/QueryCorpusResponse.md
new file mode 100644
index 000000000..cccf776a6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryCorpusResponse.md
@@ -0,0 +1,48 @@
+description: Response from QueryCorpus containing a list of relevant chunks.
+
+
+
+
+
+
+# google.generativeai.protos.QueryCorpusResponse
+
+
+
+
+
+
+
+Response from ``QueryCorpus`` containing a list of relevant chunks.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`relevant_chunks`
+ |
+
+`MutableSequence[google.ai.generativelanguage.RelevantChunk]`
+
+The relevant chunks.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryDocumentRequest.md b/docs/api/google/generativeai/protos/QueryDocumentRequest.md
new file mode 100644
index 000000000..c31688fff
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryDocumentRequest.md
@@ -0,0 +1,109 @@
+description: Request for querying a Document.
+
+
+
+
+
+
+# google.generativeai.protos.QueryDocumentRequest
+
+
+
+
+
+
+
+Request for querying a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The name of the ``Document`` to query. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+ |
+
+
+`query`
+ |
+
+`str`
+
+Required. Query string to perform semantic
+search.
+ |
+
+
+`results_count`
+ |
+
+`int`
+
+Optional. The maximum number of ``Chunk``\ s to return. The
+service may return fewer ``Chunk``\ s.
+
+If unspecified, at most 10 ``Chunk``\ s will be returned.
+The maximum specified result count is 100.
+ |
+
+
+`metadata_filters`
+ |
+
+`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
+
+Optional. Filter for ``Chunk`` metadata. Each
+``MetadataFilter`` object should correspond to a unique key.
+Multiple ``MetadataFilter`` objects are joined by logical
+"AND"s.
+
+Note: ``Document``-level filtering is not supported for this
+request because a ``Document`` name is already specified.
+
+Example query: (year >= 2020 OR year < 2010) AND (genre =
+drama OR genre = action)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2020, operation = GREATER_EQUAL}, {int_value = 2010,
+operation = LESS}}, {key = "chunk.custom_metadata.genre"
+conditions = [{string_value = "drama", operation = EQUAL},
+{string_value = "action", operation = EQUAL}}]
+
+Example query for a numeric range of values: (year > 2015
+AND year <= 2020)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2015, operation = GREATER}]}, {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2020, operation = LESS_EQUAL}]}]
+
+Note: "AND"s for the same key are only supported for numeric
+values. String values only support "OR"s for the same key.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryDocumentResponse.md b/docs/api/google/generativeai/protos/QueryDocumentResponse.md
new file mode 100644
index 000000000..aabff9d01
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryDocumentResponse.md
@@ -0,0 +1,48 @@
+description: Response from QueryDocument containing a list of relevant chunks.
+
+
+
+
+
+
+# google.generativeai.protos.QueryDocumentResponse
+
+
+
+
+
+
+
+Response from ``QueryDocument`` containing a list of relevant chunks.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`relevant_chunks`
+ |
+
+`MutableSequence[google.ai.generativelanguage.RelevantChunk]`
+
+The returned relevant chunks.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/RelevantChunk.md b/docs/api/google/generativeai/protos/RelevantChunk.md
new file mode 100644
index 000000000..c9a7f7036
--- /dev/null
+++ b/docs/api/google/generativeai/protos/RelevantChunk.md
@@ -0,0 +1,57 @@
+description: The information for a chunk relevant to a query.
+
+
+
+
+
+
+# google.generativeai.protos.RelevantChunk
+
+
+
+
+
+
+
+The information for a chunk relevant to a query.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`chunk_relevance_score`
+ |
+
+`float`
+
+``Chunk`` relevance to the query.
+ |
+
+
+`chunk`
+ |
+
+`google.ai.generativelanguage.Chunk`
+
+``Chunk`` associated with the query.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SafetyFeedback.md b/docs/api/google/generativeai/protos/SafetyFeedback.md
new file mode 100644
index 000000000..6d7df4664
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SafetyFeedback.md
@@ -0,0 +1,63 @@
+description: Safety feedback for an entire request.
+
+
+
+
+
+
+# google.generativeai.protos.SafetyFeedback
+
+
+
+
+
+
+
+Safety feedback for an entire request.
+
+
+
+This field is populated if content in the input and/or response
+is blocked due to safety settings. SafetyFeedback may not exist
+for every HarmCategory. Each SafetyFeedback will return the
+safety settings used by the request as well as the lowest
+HarmProbability that should be allowed in order to return a
+result.
+
+
+
+
+
+
+Attributes |
+
+
+
+`rating`
+ |
+
+`google.ai.generativelanguage.SafetyRating`
+
+Safety rating evaluated from content.
+ |
+
+
+`setting`
+ |
+
+`google.ai.generativelanguage.SafetySetting`
+
+Safety settings applied to the request.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SafetyRating.md b/docs/api/google/generativeai/protos/SafetyRating.md
new file mode 100644
index 000000000..b53dda402
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SafetyRating.md
@@ -0,0 +1,77 @@
+description: Safety rating for a piece of content.
+
+
+
+
+
+
+
+# google.generativeai.protos.SafetyRating
+
+
+
+
+
+
+
+Safety rating for a piece of content.
+
+
+
+The safety rating contains the category of harm and the harm
+probability level in that category for a piece of content.
+Content is classified for safety across a number of harm
+categories and the probability of the harm classification is
+included here.
+
+
+
+
+
+
+Attributes |
+
+
+
+`category`
+ |
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this rating.
+ |
+
+
+`probability`
+ |
+
+`google.ai.generativelanguage.SafetyRating.HarmProbability`
+
+Required. The probability of harm for this
+content.
+ |
+
+
+`blocked`
+ |
+
+`bool`
+
+Was this content blocked because of this
+rating?
+ |
+
+
+
+
+
+## Child Classes
+[`class HarmProbability`](../../../google/generativeai/types/HarmProbability.md)
+
diff --git a/docs/api/google/generativeai/protos/SafetySetting.md b/docs/api/google/generativeai/protos/SafetySetting.md
new file mode 100644
index 000000000..5806267ee
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SafetySetting.md
@@ -0,0 +1,64 @@
+description: Safety setting, affecting the safety-blocking behavior.
+
+
+
+
+
+
+
+# google.generativeai.protos.SafetySetting
+
+
+
+
+
+
+
+Safety setting, affecting the safety-blocking behavior.
+
+
+
+Passing a safety setting for a category changes the allowed
+probability that content is blocked.
+
+
+
+
+
+
+Attributes |
+
+
+
+`category`
+ |
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this setting.
+ |
+
+
+`threshold`
+ |
+
+`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold`
+
+Required. Controls the probability threshold
+at which harm is blocked.
+ |
+
+
+
+
+
+## Child Classes
+[`class HarmBlockThreshold`](../../../google/generativeai/types/HarmBlockThreshold.md)
+
diff --git a/docs/api/google/generativeai/protos/Schema.md b/docs/api/google/generativeai/protos/Schema.md
new file mode 100644
index 000000000..d23c8e7a1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Schema.md
@@ -0,0 +1,132 @@
+description: The Schema object allows the definition of input and output data types.
+
+
+
+
+
+
+
+# google.generativeai.protos.Schema
+
+
+
+
+
+
+
+The ``Schema`` object allows the definition of input and output data types.
+
+
+ These types can be objects, but also primitives and arrays.
+Represents a select subset of an `OpenAPI 3.0 schema
+object `__.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`type_`
+ |
+
+`google.ai.generativelanguage.Type`
+
+Required. Data type.
+ |
+
+
+`format_`
+ |
+
+`str`
+
+Optional. The format of the data. This is
+used only for primitive datatypes. Supported
+formats:
+
+ for NUMBER type: float, double
+ for INTEGER type: int32, int64
+ |
+
+
+`description`
+ |
+
+`str`
+
+Optional. A brief description of the
+parameter. This could contain examples of use.
+Parameter description may be formatted as
+Markdown.
+ |
+
+
+`nullable`
+ |
+
+`bool`
+
+Optional. Indicates if the value may be null.
+ |
+
+
+`enum`
+ |
+
+`MutableSequence[str]`
+
+Optional. Possible values of the element of Type.STRING with
+enum format. For example we can define an Enum Direction as
+: {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH",
+"WEST"]}
+ |
+
+
+`items`
+ |
+
+`google.ai.generativelanguage.Schema`
+
+Optional. Schema of the elements of
+Type.ARRAY.
+
+ |
+
+
+`properties`
+ |
+
+`MutableMapping[str, google.ai.generativelanguage.Schema]`
+
+Optional. Properties of Type.OBJECT.
+ |
+
+
+`required`
+ |
+
+`MutableSequence[str]`
+
+Optional. Required properties of Type.OBJECT.
+ |
+
+
+
+
+
+## Child Classes
+[`class PropertiesEntry`](../../../google/generativeai/protos/Schema/PropertiesEntry.md)
+
diff --git a/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
new file mode 100644
index 000000000..03df0d63c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
@@ -0,0 +1,89 @@
+description: The abstract base class for a message.
+
+
+
+
+
+
+# google.generativeai.protos.Schema.PropertiesEntry
+
+
+
+
+
+
+
+The abstract base class for a message.
+
+
+
+
+
+
+
+Args |
+
+
+mapping (Union[dict, ~.Message]): A dictionary or message to be
+used to determine the values for this message.
+ |
+
+
+
+`ignore_unknown_fields`
+ |
+
+`Optional(bool`
+
+If True, do not raise errors for
+ unknown fields. Only applied if `mapping` is a mapping type or there
+ are keyword parameters.
+ |
+
+
+`kwargs`
+ |
+
+`dict`
+
+Keys and values corresponding to the fields of the
+ message.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`key`
+ |
+
+`string key`
+ |
+
+
+`value`
+ |
+
+`Schema value`
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
new file mode 100644
index 000000000..1d930b0ec
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
@@ -0,0 +1,92 @@
+description: Configuration for retrieving grounding content from a Corpus or Document created using the Semantic Retriever API.
+
+
+
+
+
+
+# google.generativeai.protos.SemanticRetrieverConfig
+
+
+
+
+
+
+
+Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`source`
+ |
+
+`str`
+
+Required. Name of the resource for retrieval,
+e.g. corpora/123 or corpora/123/documents/abc.
+ |
+
+
+`query`
+ |
+
+`google.ai.generativelanguage.Content`
+
+Required. Query to use for similarity matching ``Chunk``\ s
+in the given resource.
+ |
+
+
+`metadata_filters`
+ |
+
+`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
+
+Optional. Filters for selecting ``Document``\ s and/or
+``Chunk``\ s from the resource.
+ |
+
+
+`max_chunks_count`
+ |
+
+`int`
+
+Optional. Maximum number of relevant ``Chunk``\ s to
+retrieve.
+
+ |
+
+
+`minimum_relevance_score`
+ |
+
+`float`
+
+Optional. Minimum relevance score for retrieved relevant
+``Chunk``\ s.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/StringList.md b/docs/api/google/generativeai/protos/StringList.md
new file mode 100644
index 000000000..d36980c9d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/StringList.md
@@ -0,0 +1,48 @@
+description: User provided string values assigned to a single metadata key.
+
+
+
+
+
+
+# google.generativeai.protos.StringList
+
+
+
+
+
+
+
+User provided string values assigned to a single metadata key.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`values`
+ |
+
+`MutableSequence[str]`
+
+The string values of the metadata to store.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TaskType.md b/docs/api/google/generativeai/protos/TaskType.md
new file mode 100644
index 000000000..159d41c7a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TaskType.md
@@ -0,0 +1,771 @@
+description: Type of task for which the embedding will be used.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.TaskType
+
+
+
+
+
+
+
+Type of task for which the embedding will be used.
+
+
+google.generativeai.protos.TaskType(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`TASK_TYPE_UNSPECIFIED`
+ |
+
+`0`
+
+Unset value, which will default to one of the
+other enum values.
+ |
+
+
+`RETRIEVAL_QUERY`
+ |
+
+`1`
+
+Specifies the given text is a query in a
+search/retrieval setting.
+ |
+
+
+`RETRIEVAL_DOCUMENT`
+ |
+
+`2`
+
+Specifies the given text is a document from
+the corpus being searched.
+ |
+
+
+`SEMANTIC_SIMILARITY`
+ |
+
+`3`
+
+Specifies the given text will be used for
+STS.
+ |
+
+
+`CLASSIFICATION`
+ |
+
+`4`
+
+Specifies that the given text will be
+classified.
+ |
+
+
+`CLUSTERING`
+ |
+
+`5`
+
+Specifies that the embeddings will be used
+for clustering.
+ |
+
+
+`QUESTION_ANSWERING`
+ |
+
+`6`
+
+Specifies that the given text will be used
+for question answering.
+ |
+
+
+`FACT_VERIFICATION`
+ |
+
+`7`
+
+Specifies that the given text will be used
+for fact verification.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+CLASSIFICATION
+ |
+
+``
+ |
+
+
+CLUSTERING
+ |
+
+``
+ |
+
+
+FACT_VERIFICATION
+ |
+
+``
+ |
+
+
+QUESTION_ANSWERING
+ |
+
+``
+ |
+
+
+RETRIEVAL_DOCUMENT
+ |
+
+``
+ |
+
+
+RETRIEVAL_QUERY
+ |
+
+``
+ |
+
+
+SEMANTIC_SIMILARITY
+ |
+
+``
+ |
+
+
+TASK_TYPE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/TextCompletion.md b/docs/api/google/generativeai/protos/TextCompletion.md
new file mode 100644
index 000000000..0331b5fb4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TextCompletion.md
@@ -0,0 +1,74 @@
+description: Output text returned from a model.
+
+
+
+
+
+
+# google.generativeai.protos.TextCompletion
+
+
+
+
+
+
+
+Output text returned from a model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`output`
+ |
+
+`str`
+
+Output only. The generated text returned from
+the model.
+ |
+
+
+`safety_ratings`
+ |
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+Ratings for the safety of a response.
+
+There is at most one rating per category.
+ |
+
+
+`citation_metadata`
+ |
+
+`google.ai.generativelanguage.CitationMetadata`
+
+Output only. Citation information for model-generated
+``output`` in this ``TextCompletion``.
+
+This field may be populated with attribution information for
+any text included in the ``output``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TextPrompt.md b/docs/api/google/generativeai/protos/TextPrompt.md
new file mode 100644
index 000000000..5b51b8057
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TextPrompt.md
@@ -0,0 +1,50 @@
+description: Text given to the model as a prompt.
+
+
+
+
+
+
+# google.generativeai.protos.TextPrompt
+
+
+
+
+
+
+
+Text given to the model as a prompt.
+
+
+
+The Model will use this TextPrompt to Generate a text
+completion.
+
+
+
+
+
+
+Attributes |
+
+
+
+`text`
+ |
+
+`str`
+
+Required. The prompt text.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Tool.md b/docs/api/google/generativeai/protos/Tool.md
new file mode 100644
index 000000000..3e4c0bb48
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Tool.md
@@ -0,0 +1,73 @@
+description: Tool details that the model may use to generate response.
+
+
+
+
+
+
+# google.generativeai.protos.Tool
+
+
+
+
+
+
+
+Tool details that the model may use to generate response.
+
+
+
+A ``Tool`` is a piece of code that enables the system to interact
+with external systems to perform an action, or set of actions,
+outside of knowledge and scope of the model.
+
+
+
+
+
+
+Attributes |
+
+
+
+`function_declarations`
+ |
+
+`MutableSequence[google.ai.generativelanguage.FunctionDeclaration]`
+
+Optional. A list of ``FunctionDeclarations`` available to
+the model that can be used for function calling.
+
+The model or system does not execute the function. Instead
+the defined function may be returned as a
+[FunctionCall][content.part.function_call] with arguments to
+the client side for execution. The model may decide to call
+a subset of these functions by populating
+[FunctionCall][content.part.function_call] in the response.
+The next conversation turn may contain a
+[FunctionResponse][content.part.function_response] with the
+[content.role] "function" generation context for the next
+model turn.
+ |
+
+
+`code_execution`
+ |
+
+`google.ai.generativelanguage.CodeExecution`
+
+Optional. Enables the model to execute code
+as part of generation.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ToolConfig.md b/docs/api/google/generativeai/protos/ToolConfig.md
new file mode 100644
index 000000000..3c18c3a18
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ToolConfig.md
@@ -0,0 +1,48 @@
+description: The Tool configuration containing parameters for specifying Tool use in the request.
+
+
+
+
+
+
+# google.generativeai.protos.ToolConfig
+
+
+
+
+
+
+
+The Tool configuration containing parameters for specifying ``Tool`` use in the request.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`function_calling_config`
+ |
+
+`google.ai.generativelanguage.FunctionCallingConfig`
+
+Optional. Function calling config.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TransferOwnershipRequest.md b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md
new file mode 100644
index 000000000..ebf034974
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md
@@ -0,0 +1,61 @@
+description: Request to transfer the ownership of the tuned model.
+
+
+
+
+
+
+# google.generativeai.protos.TransferOwnershipRequest
+
+
+
+
+
+
+
+Request to transfer the ownership of the tuned model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+`str`
+
+Required. The resource name of the tuned model to transfer
+ownership.
+
+Format: ``tunedModels/my-model-id``
+ |
+
+
+`email_address`
+ |
+
+`str`
+
+Required. The email address of the user to
+whom the tuned model is being transferred to.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TransferOwnershipResponse.md b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md
new file mode 100644
index 000000000..77a6706cc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md
@@ -0,0 +1,27 @@
+description: Response from TransferOwnership.
+
+
+
+
+
+
+# google.generativeai.protos.TransferOwnershipResponse
+
+
+
+
+
+
+
+Response from ``TransferOwnership``.
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TunedModel.md b/docs/api/google/generativeai/protos/TunedModel.md
new file mode 100644
index 000000000..a9517d69b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TunedModel.md
@@ -0,0 +1,197 @@
+description: A fine-tuned model created using ModelService.CreateTunedModel.
+
+
+
+
+
+
+
+# google.generativeai.protos.TunedModel
+
+
+
+
+
+
+
+A fine-tuned model created using ModelService.CreateTunedModel.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`tuned_model_source`
+ |
+
+`google.ai.generativelanguage.TunedModelSource`
+
+Optional. TunedModel to use as the starting
+point for training the new model.
+
+This field is a member of `oneof`_ ``source_model``.
+ |
+
+
+`base_model`
+ |
+
+`str`
+
+Immutable. The name of the ``Model`` to tune. Example:
+``models/text-bison-001``
+
+This field is a member of `oneof`_ ``source_model``.
+ |
+
+
+`name`
+ |
+
+`str`
+
+Output only. The tuned model name. A unique name will be
+generated on create. Example: ``tunedModels/az2mb0bpw6i`` If
+display_name is set on create, the id portion of the name
+will be set by concatenating the words of the display_name
+with hyphens and adding a random portion for uniqueness.
+Example: display_name = "Sentence Translator" name =
+"tunedModels/sentence-translator-u3b7m".
+ |
+
+
+`display_name`
+ |
+
+`str`
+
+Optional. The name to display for this model
+in user interfaces. The display name must be up
+to 40 characters including spaces.
+ |
+
+
+`description`
+ |
+
+`str`
+
+Optional. A short description of this model.
+ |
+
+
+`temperature`
+ |
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Values can range over ``[0.0,1.0]``, inclusive. A value
+closer to ``1.0`` will produce responses that are more
+varied, while a value closer to ``0.0`` will typically
+result in less surprising responses from the model.
+
+This value specifies default to be the one used by the base
+model while creating the model.
+
+ |
+
+
+`top_p`
+ |
+
+`float`
+
+Optional. For Nucleus sampling.
+
+Nucleus sampling considers the smallest set of tokens whose
+probability sum is at least ``top_p``.
+
+This value specifies default to be the one used by the base
+model while creating the model.
+
+ |
+
+
+`top_k`
+ |
+
+`int`
+
+Optional. For Top-k sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens. This value specifies default to be used by the
+backend while making the call to the model.
+
+This value specifies default to be the one used by the base
+model while creating the model.
+
+ |
+
+
+`state`
+ |
+
+`google.ai.generativelanguage.TunedModel.State`
+
+Output only. The state of the tuned model.
+ |
+
+
+`create_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when this model
+was created.
+ |
+
+
+`update_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when this model
+was updated.
+ |
+
+
+`tuning_task`
+ |
+
+`google.ai.generativelanguage.TuningTask`
+
+Required. The tuning task that creates the
+tuned model.
+ |
+
+
+
+
+
+## Child Classes
+[`class State`](../../../google/generativeai/types/TunedModelState.md)
+
diff --git a/docs/api/google/generativeai/protos/TunedModelSource.md b/docs/api/google/generativeai/protos/TunedModelSource.md
new file mode 100644
index 000000000..710c06e4b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TunedModelSource.md
@@ -0,0 +1,61 @@
+description: Tuned model as a source for training a new model.
+
+
+
+
+
+
+# google.generativeai.protos.TunedModelSource
+
+
+
+
+
+
+
+Tuned model as a source for training a new model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`tuned_model`
+ |
+
+`str`
+
+Immutable. The name of the ``TunedModel`` to use as the
+starting point for training the new model. Example:
+``tunedModels/my-tuned-model``
+ |
+
+
+`base_model`
+ |
+
+`str`
+
+Output only. The name of the base ``Model`` this
+``TunedModel`` was tuned from. Example:
+``models/text-bison-001``
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningExample.md b/docs/api/google/generativeai/protos/TuningExample.md
new file mode 100644
index 000000000..bcbdfc7e3
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningExample.md
@@ -0,0 +1,59 @@
+description: A single example for tuning.
+
+
+
+
+
+
+# google.generativeai.protos.TuningExample
+
+
+
+
+
+
+
+A single example for tuning.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`text_input`
+ |
+
+`str`
+
+Optional. Text model input.
+
+This field is a member of `oneof`_ ``model_input``.
+ |
+
+
+`output`
+ |
+
+`str`
+
+Required. The expected model output.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningExamples.md b/docs/api/google/generativeai/protos/TuningExamples.md
new file mode 100644
index 000000000..6a9e8670a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningExamples.md
@@ -0,0 +1,50 @@
+description: A set of tuning examples. Can be training or validation data.
+
+
+
+
+
+
+# google.generativeai.protos.TuningExamples
+
+
+
+
+
+
+
+A set of tuning examples. Can be training or validation data.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`examples`
+ |
+
+`MutableSequence[google.ai.generativelanguage.TuningExample]`
+
+Required. The examples. Example input can be
+for text or discuss, but all examples in a set
+must be of the same type.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningSnapshot.md b/docs/api/google/generativeai/protos/TuningSnapshot.md
new file mode 100644
index 000000000..1e088b8f4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningSnapshot.md
@@ -0,0 +1,77 @@
+description: Record for a single tuning step.
+
+
+
+
+
+
+# google.generativeai.protos.TuningSnapshot
+
+
+
+
+
+
+
+Record for a single tuning step.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`step`
+ |
+
+`int`
+
+Output only. The tuning step.
+ |
+
+
+`epoch`
+ |
+
+`int`
+
+Output only. The epoch this step was part of.
+ |
+
+
+`mean_loss`
+ |
+
+`float`
+
+Output only. The mean loss of the training
+examples for this step.
+ |
+
+
+`compute_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when this metric
+was computed.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningTask.md b/docs/api/google/generativeai/protos/TuningTask.md
new file mode 100644
index 000000000..8d4a15d9e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningTask.md
@@ -0,0 +1,89 @@
+description: Tuning tasks that create tuned models.
+
+
+
+
+
+
+# google.generativeai.protos.TuningTask
+
+
+
+
+
+
+
+Tuning tasks that create tuned models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`start_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when tuning this
+model started.
+ |
+
+
+`complete_time`
+ |
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when tuning this
+model completed.
+ |
+
+
+`snapshots`
+ |
+
+`MutableSequence[google.ai.generativelanguage.TuningSnapshot]`
+
+Output only. Metrics collected during tuning.
+ |
+
+
+`training_data`
+ |
+
+`google.ai.generativelanguage.Dataset`
+
+Required. Input only. Immutable. The model
+training data.
+ |
+
+
+`hyperparameters`
+ |
+
+`google.ai.generativelanguage.Hyperparameters`
+
+Immutable. Hyperparameters controlling the
+tuning process. If not provided, default values
+will be used.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Type.md b/docs/api/google/generativeai/protos/Type.md
new file mode 100644
index 000000000..8184c30fc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Type.md
@@ -0,0 +1,746 @@
+description: Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.protos.Type
+
+
+
+
+
+
+
+Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
+
+
+google.generativeai.protos.Type(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`TYPE_UNSPECIFIED`
+ |
+
+`0`
+
+Not specified, should not be used.
+ |
+
+
+`STRING`
+ |
+
+`1`
+
+String type.
+ |
+
+
+`NUMBER`
+ |
+
+`2`
+
+Number type.
+ |
+
+
+`INTEGER`
+ |
+
+`3`
+
+Integer type.
+ |
+
+
+`BOOLEAN`
+ |
+
+`4`
+
+Boolean type.
+ |
+
+
+`ARRAY`
+ |
+
+`5`
+
+Array type.
+ |
+
+
+`OBJECT`
+ |
+
+`6`
+
+Object type.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+ARRAY
+ |
+
+``
+ |
+
+
+BOOLEAN
+ |
+
+``
+ |
+
+
+INTEGER
+ |
+
+``
+ |
+
+
+NUMBER
+ |
+
+``
+ |
+
+
+OBJECT
+ |
+
+``
+ |
+
+
+STRING
+ |
+
+``
+ |
+
+
+TYPE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
new file mode 100644
index 000000000..68899d656
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
@@ -0,0 +1,57 @@
+description: Request to update CachedContent.
+
+
+
+
+
+
+# google.generativeai.protos.UpdateCachedContentRequest
+
+
+
+
+
+
+
+Request to update CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`cached_content`
+ |
+
+`google.ai.generativelanguage.CachedContent`
+
+Required. The content cache entry to update
+ |
+
+
+`update_mask`
+ |
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+The list of fields to update.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateChunkRequest.md b/docs/api/google/generativeai/protos/UpdateChunkRequest.md
new file mode 100644
index 000000000..df0ad7619
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateChunkRequest.md
@@ -0,0 +1,58 @@
+description: Request to update a Chunk.
+
+
+
+
+
+
+# google.generativeai.protos.UpdateChunkRequest
+
+
+
+
+
+
+
+Request to update a ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`chunk`
+ |
+
+`google.ai.generativelanguage.Chunk`
+
+Required. The ``Chunk`` to update.
+ |
+
+
+`update_mask`
+ |
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Currently, this only
+supports updating ``custom_metadata`` and ``data``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateCorpusRequest.md b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md
new file mode 100644
index 000000000..c38d4fd20
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md
@@ -0,0 +1,58 @@
+description: Request to update a Corpus.
+
+
+
+
+
+
+# google.generativeai.protos.UpdateCorpusRequest
+
+
+
+
+
+
+
+Request to update a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`corpus`
+ |
+
+`google.ai.generativelanguage.Corpus`
+
+Required. The ``Corpus`` to update.
+ |
+
+
+`update_mask`
+ |
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Currently, this only
+supports updating ``display_name``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateDocumentRequest.md b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md
new file mode 100644
index 000000000..d0b903835
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md
@@ -0,0 +1,58 @@
+description: Request to update a Document.
+
+
+
+
+
+
+# google.generativeai.protos.UpdateDocumentRequest
+
+
+
+
+
+
+
+Request to update a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`document`
+ |
+
+`google.ai.generativelanguage.Document`
+
+Required. The ``Document`` to update.
+ |
+
+
+`update_mask`
+ |
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Currently, this only
+supports updating ``display_name`` and ``custom_metadata``.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdatePermissionRequest.md b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md
new file mode 100644
index 000000000..1a04f87ed
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md
@@ -0,0 +1,62 @@
+description: Request to update the Permission.
+
+
+
+
+
+
+# google.generativeai.protos.UpdatePermissionRequest
+
+
+
+
+
+
+
+Request to update the ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`permission`
+ |
+
+`google.ai.generativelanguage.Permission`
+
+Required. The permission to update.
+
+The permission's ``name`` field is used to identify the
+permission to update.
+ |
+
+
+`update_mask`
+ |
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Accepted ones:
+
+- role (Permission.role field)
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
new file mode 100644
index 000000000..76ab1573c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
@@ -0,0 +1,57 @@
+description: Request to update a TunedModel.
+
+
+
+
+
+
+# google.generativeai.protos.UpdateTunedModelRequest
+
+
+
+
+
+
+
+Request to update a TunedModel.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`tuned_model`
+ |
+
+`google.ai.generativelanguage.TunedModel`
+
+Required. The tuned model to update.
+ |
+
+
+`update_mask`
+ |
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/VideoMetadata.md b/docs/api/google/generativeai/protos/VideoMetadata.md
new file mode 100644
index 000000000..5f8d7c590
--- /dev/null
+++ b/docs/api/google/generativeai/protos/VideoMetadata.md
@@ -0,0 +1,48 @@
+description: Metadata for a video File.
+
+
+
+
+
+
+# google.generativeai.protos.VideoMetadata
+
+
+
+
+
+
+
+Metadata for a video ``File``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`video_duration`
+ |
+
+`google.protobuf.duration_pb2.Duration`
+
+Duration of the video.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types.md b/docs/api/google/generativeai/types.md
new file mode 100644
index 000000000..9dacfa610
--- /dev/null
+++ b/docs/api/google/generativeai/types.md
@@ -0,0 +1,182 @@
+description: A collection of type definitions used throughout the library.
+
+
+
+
+
+
+
+# Module: google.generativeai.types
+
+
+
+
+
+
+
+A collection of type definitions used throughout the library.
+
+
+
+## Classes
+
+[`class AsyncGenerateContentResponse`](../../google/generativeai/types/AsyncGenerateContentResponse.md): This is the async version of `genai.GenerateContentResponse`.
+
+[`class AuthorError`](../../google/generativeai/types/AuthorError.md): Raised by the `chat` (or `reply`) functions when the author list can't be normalized.
+
+[`class BlobDict`](../../google/generativeai/types/BlobDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+[`class BlockedPromptException`](../../google/generativeai/types/BlockedPromptException.md): Common base class for all non-exit exceptions.
+
+[`class BlockedReason`](../../google/generativeai/types/BlockedReason.md): A list of reasons why content may have been blocked.
+
+[`class BrokenResponseError`](../../google/generativeai/types/BrokenResponseError.md): Common base class for all non-exit exceptions.
+
+[`class CallableFunctionDeclaration`](../../google/generativeai/types/CallableFunctionDeclaration.md): An extension of `FunctionDeclaration` that can be built from a python function, and is callable.
+
+[`class ChatResponse`](../../google/generativeai/types/ChatResponse.md): A chat response from the model.
+
+[`class CitationMetadataDict`](../../google/generativeai/types/CitationMetadataDict.md): A collection of source attributions for a piece of content.
+
+[`class CitationSourceDict`](../../google/generativeai/types/CitationSourceDict.md): A citation to a source for a portion of a specific response.
+
+[`class Completion`](../../google/generativeai/types/Completion.md): The result returned by generativeai.generate_text
.
+
+[`class ContentDict`](../../google/generativeai/types/ContentDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+[`class ContentFilterDict`](../../google/generativeai/types/ContentFilterDict.md): Content filtering metadata associated with processing a single request.
+
+[`class ExampleDict`](../../google/generativeai/types/ExampleDict.md): A dict representation of a protos.Example
.
+
+[`class File`](../../google/generativeai/types/File.md)
+
+[`class FileDataDict`](../../google/generativeai/types/FileDataDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+[`class FunctionDeclaration`](../../google/generativeai/types/FunctionDeclaration.md)
+
+[`class FunctionLibrary`](../../google/generativeai/types/FunctionLibrary.md): A container for a set of `Tool` objects, manages lookup and execution of their functions.
+
+[`class GenerateContentResponse`](../../google/generativeai/types/GenerateContentResponse.md): Instances of this class manage the response of the `generate_content` method.
+
+[`class GenerationConfig`](../../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+[`class GenerationConfigDict`](../../google/generativeai/types/GenerationConfigDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+[`class HarmBlockThreshold`](../../google/generativeai/types/HarmBlockThreshold.md): Block at and beyond a specified harm probability.
+
+[`class HarmCategory`](../../google/generativeai/types/HarmCategory.md): Harm Categories supported by the gemini-family model
+
+[`class HarmProbability`](../../google/generativeai/types/HarmProbability.md): The probability that a piece of content is harmful.
+
+[`class IncompleteIterationError`](../../google/generativeai/types/IncompleteIterationError.md): Common base class for all non-exit exceptions.
+
+[`class MessageDict`](../../google/generativeai/types/MessageDict.md): A dict representation of a protos.Message
.
+
+[`class MessagePromptDict`](../../google/generativeai/types/MessagePromptDict.md): A dict representation of a protos.MessagePrompt
.
+
+[`class Model`](../../google/generativeai/types/Model.md): A dataclass representation of a protos.Model
.
+
+[`class PartDict`](../../google/generativeai/types/PartDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+[`class Permission`](../../google/generativeai/types/Permission.md): A permission to access a resource.
+
+[`class Permissions`](../../google/generativeai/types/Permissions.md)
+
+[`class RequestOptions`](../../google/generativeai/types/RequestOptions.md): Request options
+
+[`class ResponseDict`](../../google/generativeai/types/ResponseDict.md): A dict representation of a protos.GenerateMessageResponse
.
+
+[`class SafetyFeedbackDict`](../../google/generativeai/types/SafetyFeedbackDict.md): Safety feedback for an entire request.
+
+[`class SafetyRatingDict`](../../google/generativeai/types/SafetyRatingDict.md): Safety rating for a piece of content.
+
+[`class SafetySettingDict`](../../google/generativeai/types/SafetySettingDict.md): Safety setting, affecting the safety-blocking behavior.
+
+[`class Status`](../../google/generativeai/types/Status.md): A ProtocolMessage
+
+[`class StopCandidateException`](../../google/generativeai/types/StopCandidateException.md): Common base class for all non-exit exceptions.
+
+[`class Tool`](../../google/generativeai/types/Tool.md): A wrapper for protos.Tool
, Contains a collection of related `FunctionDeclaration` objects.
+
+[`class ToolDict`](../../google/generativeai/types/ToolDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+[`class TunedModel`](../../google/generativeai/types/TunedModel.md): A dataclass representation of a protos.TunedModel
.
+
+[`class TunedModelState`](../../google/generativeai/types/TunedModelState.md): The state of the tuned model.
+
+## Functions
+
+[`TypedDict(...)`](../../google/generativeai/types/TypedDict.md): A simple typed namespace. At runtime it is equivalent to a plain dict.
+
+[`get_default_file_client(...)`](../../google/generativeai/types/get_default_file_client.md)
+
+[`to_file_data(...)`](../../google/generativeai/types/to_file_data.md)
+
+## Type Aliases
+
+[`AnyModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md)
+
+[`BaseModelNameOptions`](../../google/generativeai/types/BaseModelNameOptions.md)
+
+[`BlobType`](../../google/generativeai/types/BlobType.md)
+
+[`ContentType`](../../google/generativeai/types/ContentType.md)
+
+[`ContentsType`](../../google/generativeai/types/ContentsType.md)
+
+[`ExampleOptions`](../../google/generativeai/types/ExampleOptions.md)
+
+[`ExamplesOptions`](../../google/generativeai/types/ExamplesOptions.md)
+
+[`FileDataType`](../../google/generativeai/types/FileDataType.md)
+
+[`FunctionDeclarationType`](../../google/generativeai/types/FunctionDeclarationType.md)
+
+[`FunctionLibraryType`](../../google/generativeai/types/FunctionLibraryType.md)
+
+[`GenerationConfigType`](../../google/generativeai/types/GenerationConfigType.md)
+
+[`MessageOptions`](../../google/generativeai/types/MessageOptions.md)
+
+[`MessagePromptOptions`](../../google/generativeai/types/MessagePromptOptions.md)
+
+[`MessagesOptions`](../../google/generativeai/types/MessagesOptions.md)
+
+[`ModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md)
+
+[`ModelsIterable`](../../google/generativeai/types/ModelsIterable.md)
+
+[`PartType`](../../google/generativeai/types/PartType.md)
+
+[`RequestOptionsType`](../../google/generativeai/types/RequestOptionsType.md)
+
+[`StrictContentType`](../../google/generativeai/types/StrictContentType.md)
+
+[`ToolsType`](../../google/generativeai/types/ToolsType.md)
+
+[`TunedModelNameOptions`](../../google/generativeai/types/TunedModelNameOptions.md)
+
+
+
+
+
+
+Other Members |
+
+
+
+annotations
+ |
+
+Instance of `__future__._Feature`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/AnyModelNameOptions.md b/docs/api/google/generativeai/types/AnyModelNameOptions.md
new file mode 100644
index 000000000..c2090da48
--- /dev/null
+++ b/docs/api/google/generativeai/types/AnyModelNameOptions.md
@@ -0,0 +1,27 @@
+
+
+
+
+
+# google.generativeai.types.AnyModelNameOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+AnyModelNameOptions = Union[
+ str,
+ google.generativeai.types.Model
,
+ google.generativeai.protos.Model
,
+ google.generativeai.types.TunedModel
,
+ google.generativeai.protos.TunedModel
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
new file mode 100644
index 000000000..0e302ea41
--- /dev/null
+++ b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
@@ -0,0 +1,152 @@
+description: This is the async version of genai.GenerateContentResponse.
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.AsyncGenerateContentResponse
+
+
+
+
+
+
+
+This is the async version of `genai.GenerateContentResponse`.
+
+
+google.generativeai.types.AsyncGenerateContentResponse(
+ done: bool,
+ iterator: (None | Iterable[protos.GenerateContentResponse] | AsyncIterable[protos.
+ GenerateContentResponse]),
+ result: protos.GenerateContentResponse,
+ chunks: (Iterable[protos.GenerateContentResponse] | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+The list of candidate responses.
+ |
+
+
+`parts`
+ |
+
+A quick accessor equivalent to `self.candidates[0].content.parts`
+ |
+
+
+`prompt_feedback`
+ |
+
+
+ |
+
+
+`text`
+ |
+
+A quick accessor equivalent to `self.candidates[0].content.parts[0].text`
+ |
+
+
+`usage_metadata`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_aiterator
+
+View source
+
+
+from_aiterator(
+ iterator
+)
+
+
+
+
+
+from_response
+
+View source
+
+
+@classmethod
+from_response(
+ response: protos.GenerateContentResponse
+)
+
+
+
+
+
+resolve
+
+View source
+
+
+resolve()
+
+
+
+
+
+to_dict
+
+View source
+
+
+to_dict()
+
+
+Returns the result as a JSON-compatible dict.
+
+Note: This doesn't capture the iterator state when streaming, it only captures the accumulated
+`GenerateContentResponse` fields.
+
+```
+>>> import json
+>>> response = model.generate_content('Hello?')
+>>> json.dumps(response.to_dict())
+```
+
+
+
diff --git a/docs/api/google/generativeai/types/AuthorError.md b/docs/api/google/generativeai/types/AuthorError.md
new file mode 100644
index 000000000..0220cd6f5
--- /dev/null
+++ b/docs/api/google/generativeai/types/AuthorError.md
@@ -0,0 +1,27 @@
+description: Raised by the chat (or reply) functions when the author list can't be normalized.
+
+
+
+
+
+
+# google.generativeai.types.AuthorError
+
+
+
+
+
+
+
+Raised by the `chat` (or `reply`) functions when the author list can't be normalized.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BaseModelNameOptions.md b/docs/api/google/generativeai/types/BaseModelNameOptions.md
new file mode 100644
index 000000000..25a142c3e
--- /dev/null
+++ b/docs/api/google/generativeai/types/BaseModelNameOptions.md
@@ -0,0 +1,25 @@
+
+
+
+
+
+# google.generativeai.types.BaseModelNameOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+BaseModelNameOptions = Union[
+ str,
+ google.generativeai.types.Model
,
+ google.generativeai.protos.Model
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlobDict.md b/docs/api/google/generativeai/types/BlobDict.md
new file mode 100644
index 000000000..b13257433
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlobDict.md
@@ -0,0 +1,27 @@
+description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+
+
+
+
+# google.generativeai.types.BlobDict
+
+
+
+
+
+
+
+dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+ For example: dict(one=1, two=2)
+
diff --git a/docs/api/google/generativeai/types/BlobType.md b/docs/api/google/generativeai/types/BlobType.md
new file mode 100644
index 000000000..82039d71c
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlobType.md
@@ -0,0 +1,26 @@
+
+
+
+
+
+# google.generativeai.types.BlobType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+BlobType = Union[
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlockedPromptException.md b/docs/api/google/generativeai/types/BlockedPromptException.md
new file mode 100644
index 000000000..93dc09556
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlockedPromptException.md
@@ -0,0 +1,27 @@
+description: Common base class for all non-exit exceptions.
+
+
+
+
+
+
+# google.generativeai.types.BlockedPromptException
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlockedReason.md b/docs/api/google/generativeai/types/BlockedReason.md
new file mode 100644
index 000000000..4508388eb
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlockedReason.md
@@ -0,0 +1,687 @@
+description: A list of reasons why content may have been blocked.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.BlockedReason
+
+
+
+
+
+
+
+A list of reasons why content may have been blocked.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.ContentFilter.BlockedReason`
+
+
+
+
+google.generativeai.types.BlockedReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`BLOCKED_REASON_UNSPECIFIED`
+ |
+
+`0`
+
+A blocked reason was not specified.
+ |
+
+
+`SAFETY`
+ |
+
+`1`
+
+Content was blocked by safety settings.
+ |
+
+
+`OTHER`
+ |
+
+`2`
+
+Content was blocked, but the reason is
+uncategorized.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+BLOCKED_REASON_UNSPECIFIED
+ |
+
+``
+ |
+
+
+OTHER
+ |
+
+``
+ |
+
+
+SAFETY
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/BrokenResponseError.md b/docs/api/google/generativeai/types/BrokenResponseError.md
new file mode 100644
index 000000000..073ade6c0
--- /dev/null
+++ b/docs/api/google/generativeai/types/BrokenResponseError.md
@@ -0,0 +1,27 @@
+description: Common base class for all non-exit exceptions.
+
+
+
+
+
+
+# google.generativeai.types.BrokenResponseError
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/CallableFunctionDeclaration.md b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md
new file mode 100644
index 000000000..5a8e8eebc
--- /dev/null
+++ b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md
@@ -0,0 +1,144 @@
+description: An extension of FunctionDeclaration that can be built from a python function, and is callable.
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.CallableFunctionDeclaration
+
+
+
+
+
+
+
+An extension of `FunctionDeclaration` that can be built from a python function, and is callable.
+
+Inherits From: [`FunctionDeclaration`](../../../google/generativeai/types/FunctionDeclaration.md)
+
+
+google.generativeai.types.CallableFunctionDeclaration(
+ *,
+ name: str,
+ description: str,
+ parameters: (dict[str, Any] | None) = None,
+ function: Callable[..., Any]
+)
+
+
+
+
+
+
+Note: The python function must have type annotations.
+
+
+
+
+
+
+Attributes |
+
+
+
+`description`
+ |
+
+
+ |
+
+
+`name`
+ |
+
+
+ |
+
+
+`parameters`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_function
+
+View source
+
+
+@staticmethod
+from_function(
+ function: Callable[..., Any], descriptions: (dict[str, str] | None) = None
+)
+
+
+Builds a `CallableFunctionDeclaration` from a python function.
+
+The function should have type annotations.
+
+This method is able to generate the schema for arguments annotated with types:
+
+`AllowedTypes = float | int | str | list[AllowedTypes] | dict`
+
+This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary
+contents. But you can build these manually.
+
+from_proto
+
+View source
+
+
+@classmethod
+from_proto(
+ proto
+) -> FunctionDeclaration
+
+
+
+
+
+to_proto
+
+View source
+
+
+to_proto() -> protos.FunctionDeclaration
+
+
+
+
+
+__call__
+
+View source
+
+
+__call__(
+ fc: protos.FunctionCall
+) -> protos.FunctionResponse
+
+
+Call self as a function.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ChatResponse.md b/docs/api/google/generativeai/types/ChatResponse.md
new file mode 100644
index 000000000..5b8453b7c
--- /dev/null
+++ b/docs/api/google/generativeai/types/ChatResponse.md
@@ -0,0 +1,223 @@
+description: A chat response from the model.
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.ChatResponse
+
+
+
+
+
+
+
+A chat response from the model.
+
+
+
+* Use `response.last` (settable) for easy access to the text of the last response.
+ (`messages[-1]['content']`)
+* Use `response.messages` to access the message history (including `.last`).
+* Use `response.candidates` to access all the responses generated by the model.
+
+Other attributes are just saved from the arguments to `genai.chat`, so you
+can easily continue a conversation:
+
+```
+import google.generativeai as genai
+
+genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
+
+response = genai.chat(messages=["Hello."])
+print(response.last) # 'Hello! What can I help you with?'
+response.reply("Can you tell me a joke?")
+```
+
+See `genai.chat` for more details.
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+A list of candidate responses from the model.
+
+The top candidate is appended to the `messages` field.
+
+This list will contain a *maximum* of `candidate_count` candidates.
+It may contain fewer (duplicates are dropped), it will contain at least one.
+
+Note: The `temperature` field affects the variability of the responses. Low
+temperatures will return few candidates. Setting `temperature=0` is deterministic,
+so it will only ever return one candidate.
+ |
+
+
+`filters`
+ |
+
+This indicates which `types.SafetyCategory`(s) blocked a
+candidate from this response, the lowest types.HarmProbability
+that triggered a block, and the `types.HarmThreshold` setting for that category.
+This indicates the smallest change to the `types.SafetySettings` that would be
+necessary to unblock at least 1 response.
+
+The blocking is configured by the `types.SafetySettings` in the request (or the
+default `types.SafetySettings` of the API).
+ |
+
+
+`messages`
+ |
+
+A snapshot of the conversation history sorted chronologically.
+ |
+
+
+`model`
+ |
+
+The model name.
+ |
+
+
+`context`
+ |
+
+Text that should be provided to the model first, to ground the response.
+ |
+
+
+`examples`
+ |
+
+Examples of what the model should generate.
+ |
+
+
+`temperature`
+ |
+
+Controls the randomness of the output. Must be positive.
+ |
+
+
+`candidate_count`
+ |
+
+The **maximum** number of generated response messages to return.
+ |
+
+
+`top_k`
+ |
+
+The maximum number of tokens to consider when sampling.
+ |
+
+
+`top_p`
+ |
+
+The maximum cumulative probability of tokens to consider when sampling.
+ |
+
+
+`last`
+ |
+
+A settable property that provides simple access to the last response string
+
+
+A shortcut for `response.messages[0]['content']`.
+ |
+
+
+
+
+
+## Methods
+
+reply
+
+View source
+
+
+@abc.abstractmethod
+reply(
+ message: google.generativeai.types.MessageOptions
+) -> 'ChatResponse'
+
+
+Add a message to the conversation, and get the model's response.
+
+
+to_dict
+
+View source
+
+
+to_dict() -> Dict[str, Any]
+
+
+
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+top_k
+ |
+
+`None`
+ |
+
+
+top_p
+ |
+
+`None`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/CitationMetadataDict.md b/docs/api/google/generativeai/types/CitationMetadataDict.md
new file mode 100644
index 000000000..fc61779d2
--- /dev/null
+++ b/docs/api/google/generativeai/types/CitationMetadataDict.md
@@ -0,0 +1,48 @@
+description: A collection of source attributions for a piece of content.
+
+
+
+
+
+
+# google.generativeai.types.CitationMetadataDict
+
+
+
+
+
+
+
+A collection of source attributions for a piece of content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`citation_sources`
+ |
+
+`MutableSequence[google.ai.generativelanguage.CitationSource]`
+
+Citations to sources for a specific response.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/CitationSourceDict.md b/docs/api/google/generativeai/types/CitationSourceDict.md
new file mode 100644
index 000000000..2df1ee443
--- /dev/null
+++ b/docs/api/google/generativeai/types/CitationSourceDict.md
@@ -0,0 +1,84 @@
+description: A citation to a source for a portion of a specific response.
+
+
+
+
+
+
+# google.generativeai.types.CitationSourceDict
+
+
+
+
+
+
+
+A citation to a source for a portion of a specific response.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`start_index`
+ |
+
+`int`
+
+Optional. Start of segment of the response
+that is attributed to this source.
+
+Index indicates the start of the segment,
+measured in bytes.
+ |
+
+
+`end_index`
+ |
+
+`int`
+
+Optional. End of the attributed segment,
+exclusive.
+ |
+
+
+`uri`
+ |
+
+`str`
+
+Optional. URI that is attributed as a source
+for a portion of the text.
+ |
+
+
+`license_`
+ |
+
+`str`
+
+Optional. License for the GitHub project that
+is attributed as a source for segment.
+
+License info is required for code citations.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Completion.md b/docs/api/google/generativeai/types/Completion.md
new file mode 100644
index 000000000..9b7ac8daf
--- /dev/null
+++ b/docs/api/google/generativeai/types/Completion.md
@@ -0,0 +1,97 @@
+description: The result returned by generativeai.generate_text
.
+
+
+
+
+
+
+
+
+# google.generativeai.types.Completion
+
+
+
+
+
+
+
+The result returned by generativeai.generate_text
.
+
+
+
+Use GenerateTextResponse.candidates
to access all the completions generated by the model.
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+A list of candidate text completions generated by the model.
+ |
+
+
+`result`
+ |
+
+The output of the first candidate,
+ |
+
+
+`filters`
+ |
+
+Indicates the reasons why content may have been blocked.
+See types.BlockedReason .
+ |
+
+
+`safety_feedback`
+ |
+
+Indicates which safety settings blocked content in this result.
+ |
+
+
+
+
+
+## Methods
+
+to_dict
+
+View source
+
+
+to_dict() -> Dict[str, Any]
+
+
+
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentDict.md b/docs/api/google/generativeai/types/ContentDict.md
new file mode 100644
index 000000000..3334b93a0
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentDict.md
@@ -0,0 +1,27 @@
+description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+
+
+
+
+# google.generativeai.types.ContentDict
+
+
+
+
+
+
+
+dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+ For example: dict(one=1, two=2)
+
diff --git a/docs/api/google/generativeai/types/ContentFilterDict.md b/docs/api/google/generativeai/types/ContentFilterDict.md
new file mode 100644
index 000000000..a15074d73
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentFilterDict.md
@@ -0,0 +1,62 @@
+description: Content filtering metadata associated with processing a single request.
+
+
+
+
+
+
+# google.generativeai.types.ContentFilterDict
+
+
+
+
+
+
+
+Content filtering metadata associated with processing a single request.
+
+
+ContentFilter contains a reason and an optional supporting
+string. The reason may be unspecified.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`reason`
+ |
+
+`google.ai.generativelanguage.ContentFilter.BlockedReason`
+
+The reason content was blocked during request
+processing.
+ |
+
+
+`message`
+ |
+
+`str`
+
+A string that describes the filtering
+behavior in more detail.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentType.md b/docs/api/google/generativeai/types/ContentType.md
new file mode 100644
index 000000000..775f39380
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentType.md
@@ -0,0 +1,38 @@
+
+
+
+
+
+# google.generativeai.types.ContentType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ContentType = Union[
+ google.generativeai.protos.Content
,
+ google.generativeai.types.ContentDict
,
+ Iterable[google.generativeai.types.PartType
],
+ google.generativeai.protos.Part
,
+ google.generativeai.types.PartDict
,
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image,
+ str,
+ google.generativeai.protos.FunctionCall
,
+ google.generativeai.protos.FunctionResponse
,
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentsType.md b/docs/api/google/generativeai/types/ContentsType.md
new file mode 100644
index 000000000..79a3feef8
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentsType.md
@@ -0,0 +1,40 @@
+
+
+
+
+
+# google.generativeai.types.ContentsType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ContentsType = Union[
+ google.generativeai.protos.Content
,
+ google.generativeai.types.ContentDict
,
+ Iterable[google.generativeai.types.PartType
],
+ google.generativeai.protos.Part
,
+ google.generativeai.types.PartDict
,
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image,
+ str,
+ google.generativeai.protos.FunctionCall
,
+ google.generativeai.protos.FunctionResponse
,
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
,
+ Iterable[google.generativeai.types.StrictContentType
],
+ NoneType
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ExampleDict.md b/docs/api/google/generativeai/types/ExampleDict.md
new file mode 100644
index 000000000..a2fae446a
--- /dev/null
+++ b/docs/api/google/generativeai/types/ExampleDict.md
@@ -0,0 +1,27 @@
+description: A dict representation of a protos.Example
.
+
+
+
+
+
+
+# google.generativeai.types.ExampleDict
+
+
+
+
+
+
+
+A dict representation of a protos.Example
.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ExampleOptions.md b/docs/api/google/generativeai/types/ExampleOptions.md
new file mode 100644
index 000000000..6aa3bc965
--- /dev/null
+++ b/docs/api/google/generativeai/types/ExampleOptions.md
@@ -0,0 +1,26 @@
+
+
+
+
+
+# google.generativeai.types.ExampleOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ExampleOptions = Union[
+ tuple[google.generativeai.types.MessageOptions
, google.generativeai.types.MessageOptions
],
+ Iterable[google.generativeai.types.MessageOptions
],
+ google.generativeai.types.ExampleDict
,
+ google.generativeai.protos.Example
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ExamplesOptions.md b/docs/api/google/generativeai/types/ExamplesOptions.md
new file mode 100644
index 000000000..71389d120
--- /dev/null
+++ b/docs/api/google/generativeai/types/ExamplesOptions.md
@@ -0,0 +1,27 @@
+
+
+
+
+
+# google.generativeai.types.ExamplesOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ExamplesOptions = Union[
+ tuple[google.generativeai.types.MessageOptions
, google.generativeai.types.MessageOptions
],
+ Iterable[google.generativeai.types.MessageOptions
],
+ google.generativeai.types.ExampleDict
,
+ google.generativeai.protos.Example
,
+ Iterable[google.generativeai.types.ExampleOptions
]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/File.md b/docs/api/google/generativeai/types/File.md
new file mode 100644
index 000000000..52f5d6996
--- /dev/null
+++ b/docs/api/google/generativeai/types/File.md
@@ -0,0 +1,170 @@
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.File
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.File(
+ proto: (protos.File | File | dict)
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`create_time`
+ |
+
+
+ |
+
+
+`display_name`
+ |
+
+
+ |
+
+
+`error`
+ |
+
+
+ |
+
+
+`expiration_time`
+ |
+
+
+ |
+
+
+`mime_type`
+ |
+
+
+ |
+
+
+`name`
+ |
+
+
+ |
+
+
+`sha256_hash`
+ |
+
+
+ |
+
+
+`size_bytes`
+ |
+
+
+ |
+
+
+`state`
+ |
+
+
+ |
+
+
+`update_time`
+ |
+
+
+ |
+
+
+`uri`
+ |
+
+
+ |
+
+
+`video_metadata`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+delete
+
+View source
+
+
+delete()
+
+
+
+
+
+to_dict
+
+View source
+
+
+to_dict() -> dict[str, Any]
+
+
+
+
+
+to_proto
+
+View source
+
+
+to_proto() -> protos.File
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FileDataDict.md b/docs/api/google/generativeai/types/FileDataDict.md
new file mode 100644
index 000000000..455cafc5d
--- /dev/null
+++ b/docs/api/google/generativeai/types/FileDataDict.md
@@ -0,0 +1,27 @@
+description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+
+
+
+
+# google.generativeai.types.FileDataDict
+
+
+
+
+
+
+
+dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+ For example: dict(one=1, two=2)
+
diff --git a/docs/api/google/generativeai/types/FileDataType.md b/docs/api/google/generativeai/types/FileDataType.md
new file mode 100644
index 000000000..20d2cefd0
--- /dev/null
+++ b/docs/api/google/generativeai/types/FileDataType.md
@@ -0,0 +1,26 @@
+
+
+
+
+
+# google.generativeai.types.FileDataType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+FileDataType = Union[
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionDeclaration.md b/docs/api/google/generativeai/types/FunctionDeclaration.md
new file mode 100644
index 000000000..672fd711d
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionDeclaration.md
@@ -0,0 +1,121 @@
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.FunctionDeclaration
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.FunctionDeclaration(
+ *, name: str, description: str, parameters: (dict[str, Any] | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`description`
+ |
+
+
+ |
+
+
+`name`
+ |
+
+
+ |
+
+
+`parameters`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_function
+
+View source
+
+
+@staticmethod
+from_function(
+ function: Callable[..., Any], descriptions: (dict[str, str] | None) = None
+)
+
+
+Builds a `CallableFunctionDeclaration` from a python function.
+
+The function should have type annotations.
+
+This method is able to generate the schema for arguments annotated with types:
+
+`AllowedTypes = float | int | str | list[AllowedTypes] | dict`
+
+This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary
+contents. But you can build these manually.
+
+from_proto
+
+View source
+
+
+@classmethod
+from_proto(
+ proto
+) -> FunctionDeclaration
+
+
+
+
+
+to_proto
+
+View source
+
+
+to_proto() -> protos.FunctionDeclaration
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionDeclarationType.md b/docs/api/google/generativeai/types/FunctionDeclarationType.md
new file mode 100644
index 000000000..4d9e8a85a
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionDeclarationType.md
@@ -0,0 +1,26 @@
+
+
+
+
+
+# google.generativeai.types.FunctionDeclarationType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+FunctionDeclarationType = Union[
+ google.generativeai.types.FunctionDeclaration
,
+ google.generativeai.protos.FunctionDeclaration
,
+ dict[str, Any],
+ Callable[..., Any]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionLibrary.md b/docs/api/google/generativeai/types/FunctionLibrary.md
new file mode 100644
index 000000000..e8d386181
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionLibrary.md
@@ -0,0 +1,80 @@
+description: A container for a set of Tool objects, manages lookup and execution of their functions.
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.FunctionLibrary
+
+
+
+
+
+
+
+A container for a set of `Tool` objects, manages lookup and execution of their functions.
+
+
+google.generativeai.types.FunctionLibrary(
+ tools: Iterable[ToolType]
+)
+
+
+
+
+
+
+
+## Methods
+
+to_proto
+
+View source
+
+
+to_proto()
+
+
+
+
+
+__call__
+
+View source
+
+
+__call__(
+ fc: protos.FunctionCall
+) -> (protos.Part | None)
+
+
+Call self as a function.
+
+
+__getitem__
+
+View source
+
+
+__getitem__(
+ name: (str | protos.FunctionCall)
+) -> (FunctionDeclaration | protos.FunctionDeclaration)
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionLibraryType.md b/docs/api/google/generativeai/types/FunctionLibraryType.md
new file mode 100644
index 000000000..2b49e5ad1
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionLibraryType.md
@@ -0,0 +1,32 @@
+
+
+
+
+
+# google.generativeai.types.FunctionLibraryType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+FunctionLibraryType = Union[
+ google.generativeai.types.FunctionLibrary
,
+ Iterable[Union[google.generativeai.types.Tool
, google.generativeai.protos.Tool
, google.generativeai.types.ToolDict
, Iterable[google.generativeai.types.FunctionDeclarationType
], google.generativeai.types.FunctionDeclaration
, google.generativeai.protos.FunctionDeclaration
, dict[str, Any], Callable[..., Any]]],
+ google.generativeai.types.Tool
,
+ google.generativeai.protos.Tool
,
+ google.generativeai.types.ToolDict
,
+ Iterable[google.generativeai.types.FunctionDeclarationType
],
+ google.generativeai.types.FunctionDeclaration
,
+ google.generativeai.protos.FunctionDeclaration
,
+ dict[str, Any],
+ Callable[..., Any]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerateContentResponse.md b/docs/api/google/generativeai/types/GenerateContentResponse.md
new file mode 100644
index 000000000..5f1c6c118
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerateContentResponse.md
@@ -0,0 +1,185 @@
+description: Instances of this class manage the response of the generate_content method.
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.GenerateContentResponse
+
+
+
+
+
+
+
+Instances of this class manage the response of the `generate_content` method.
+
+
+google.generativeai.types.GenerateContentResponse(
+ done: bool,
+ iterator: (None | Iterable[protos.GenerateContentResponse] | AsyncIterable[protos.
+ GenerateContentResponse]),
+ result: protos.GenerateContentResponse,
+ chunks: (Iterable[protos.GenerateContentResponse] | None) = None
+)
+
+
+
+
+
+
+These are returned by GenerativeModel.generate_content
and ChatSession.send_message
.
+This object is based on the low level protos.GenerateContentResponse
class which just has `prompt_feedback`
+and `candidates` attributes. This class adds several quick accessors for common use cases.
+
+The same object type is returned for both `stream=True/False`.
+
+### Streaming
+
+When you pass `stream=True` to GenerativeModel.generate_content
or ChatSession.send_message
,
+iterate over this object to receive chunks of the response:
+
+```
+response = model.generate_content(..., stream=True):
+for chunk in response:
+ print(chunk.text)
+```
+
+GenerateContentResponse.prompt_feedback
is available immediately but
+GenerateContentResponse.candidates
, and all the attributes derived from them (`.text`, `.parts`),
+are only available after the iteration is complete.
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidates`
+ |
+
+The list of candidate responses.
+ |
+
+
+`parts`
+ |
+
+A quick accessor equivalent to `self.candidates[0].content.parts`
+ |
+
+
+`prompt_feedback`
+ |
+
+
+ |
+
+
+`text`
+ |
+
+A quick accessor equivalent to `self.candidates[0].content.parts[0].text`
+ |
+
+
+`usage_metadata`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_iterator
+
+View source
+
+
+@classmethod
+from_iterator(
+ iterator: Iterable[protos.GenerateContentResponse]
+)
+
+
+
+
+
+from_response
+
+View source
+
+
+@classmethod
+from_response(
+ response: protos.GenerateContentResponse
+)
+
+
+
+
+
+resolve
+
+View source
+
+
+resolve()
+
+
+
+
+
+to_dict
+
+View source
+
+
+to_dict()
+
+
+Returns the result as a JSON-compatible dict.
+
+Note: This doesn't capture the iterator state when streaming, it only captures the accumulated
+`GenerateContentResponse` fields.
+
+```
+>>> import json
+>>> response = model.generate_content('Hello?')
+>>> json.dumps(response.to_dict())
+```
+
+__iter__
+
+View source
+
+
+__iter__()
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerationConfig.md b/docs/api/google/generativeai/types/GenerationConfig.md
new file mode 100644
index 000000000..0a51b7978
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerationConfig.md
@@ -0,0 +1,255 @@
+description: A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.GenerationConfig
+
+
+
+
+
+
+
+A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.GenerationConfig`
+
+
+
+
+google.generativeai.types.GenerationConfig(
+ candidate_count: (int | None) = None,
+ stop_sequences: (Iterable[str] | None) = None,
+ max_output_tokens: (int | None) = None,
+ temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (int | None) = None,
+ response_mime_type: (str | None) = None,
+ response_schema: (protos.Schema | Mapping[str, Any] | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`candidate_count`
+ |
+
+ Number of generated responses to return.
+ |
+
+
+`stop_sequences`
+ |
+
+ The set of character sequences (up
+to 5) that will stop output generation. If
+specified, the API will stop at the first
+appearance of a stop sequence. The stop sequence
+will not be included as part of the response.
+ |
+
+
+`max_output_tokens`
+ |
+
+ The maximum number of tokens to include in a
+candidate.
+
+If unset, this will default to output_token_limit specified
+in the model's specification.
+ |
+
+
+`temperature`
+ |
+
+ Controls the randomness of the output. Note: The
+default value varies by model, see the Model.temperature
+attribute of the `Model` returned the `genai.get_model`
+function.
+
+Values can range from [0.0,1.0], inclusive. A value closer
+to 1.0 will produce responses that are more varied and
+creative, while a value closer to 0.0 will typically result
+in more straightforward responses from the model.
+ |
+
+
+`top_p`
+ |
+
+ Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Tokens are sorted based on their assigned probabilities so
+that only the most likely tokens are considered. Top-k
+sampling directly limits the maximum number of tokens to
+consider, while Nucleus sampling limits number of tokens
+based on the cumulative probability.
+
+Note: The default value varies by model, see the
+Model.top_p attribute of the `Model` returned the
+`genai.get_model` function.
+ |
+
+
+`top_k`
+ |
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Top-k sampling considers the set of `top_k` most probable
+tokens. Defaults to 40.
+
+Note: The default value varies by model, see the
+Model.top_k attribute of the `Model` returned the
+`genai.get_model` function.
+ |
+
+
+`response_mime_type`
+ |
+
+ Optional. Output response mimetype of the generated candidate text.
+
+Supported mimetype:
+ `text/plain`: (default) Text output.
+ `application/json`: JSON response in the candidates.
+ |
+
+
+`response_schema`
+ |
+
+ Optional. Specifies the format of the JSON requested if response_mime_type is
+`application/json`.
+ |
+
+
+
+
+
+## Methods
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+candidate_count
+ |
+
+`None`
+ |
+
+
+max_output_tokens
+ |
+
+`None`
+ |
+
+
+response_mime_type
+ |
+
+`None`
+ |
+
+
+response_schema
+ |
+
+`None`
+ |
+
+
+stop_sequences
+ |
+
+`None`
+ |
+
+
+temperature
+ |
+
+`None`
+ |
+
+
+top_k
+ |
+
+`None`
+ |
+
+
+top_p
+ |
+
+`None`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerationConfigDict.md b/docs/api/google/generativeai/types/GenerationConfigDict.md
new file mode 100644
index 000000000..9cd508a9a
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerationConfigDict.md
@@ -0,0 +1,27 @@
+description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+
+
+
+
+# google.generativeai.types.GenerationConfigDict
+
+
+
+
+
+
+
+dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+ For example: dict(one=1, two=2)
+
diff --git a/docs/api/google/generativeai/types/GenerationConfigType.md b/docs/api/google/generativeai/types/GenerationConfigType.md
new file mode 100644
index 000000000..fcd42bdf9
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerationConfigType.md
@@ -0,0 +1,25 @@
+
+
+
+
+
+# google.generativeai.types.GenerationConfigType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+GenerationConfigType = Union[
+ google.generativeai.protos.GenerationConfig
,
+ google.generativeai.types.GenerationConfigDict
,
+ google.generativeai.types.GenerationConfig
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/HarmBlockThreshold.md b/docs/api/google/generativeai/types/HarmBlockThreshold.md
new file mode 100644
index 000000000..bea677e9b
--- /dev/null
+++ b/docs/api/google/generativeai/types/HarmBlockThreshold.md
@@ -0,0 +1,722 @@
+description: Block at and beyond a specified harm probability.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.HarmBlockThreshold
+
+
+
+
+
+
+
+Block at and beyond a specified harm probability.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.SafetySetting.HarmBlockThreshold`
+
+
+
+
+google.generativeai.types.HarmBlockThreshold(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`HARM_BLOCK_THRESHOLD_UNSPECIFIED`
+ |
+
+`0`
+
+Threshold is unspecified.
+ |
+
+
+`BLOCK_LOW_AND_ABOVE`
+ |
+
+`1`
+
+Content with NEGLIGIBLE will be allowed.
+ |
+
+
+`BLOCK_MEDIUM_AND_ABOVE`
+ |
+
+`2`
+
+Content with NEGLIGIBLE and LOW will be
+allowed.
+ |
+
+
+`BLOCK_ONLY_HIGH`
+ |
+
+`3`
+
+Content with NEGLIGIBLE, LOW, and MEDIUM will
+be allowed.
+ |
+
+
+`BLOCK_NONE`
+ |
+
+`4`
+
+All content will be allowed.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+BLOCK_LOW_AND_ABOVE
+ |
+
+``
+ |
+
+
+BLOCK_MEDIUM_AND_ABOVE
+ |
+
+``
+ |
+
+
+BLOCK_NONE
+ |
+
+``
+ |
+
+
+BLOCK_ONLY_HIGH
+ |
+
+``
+ |
+
+
+HARM_BLOCK_THRESHOLD_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/HarmCategory.md b/docs/api/google/generativeai/types/HarmCategory.md
new file mode 100644
index 000000000..a22f1f060
--- /dev/null
+++ b/docs/api/google/generativeai/types/HarmCategory.md
@@ -0,0 +1,657 @@
+description: Harm Categories supported by the gemini-family model
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.HarmCategory
+
+
+
+
+
+
+
+Harm Categories supported by the gemini-family model
+
+
+google.generativeai.types.HarmCategory(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+HARM_CATEGORY_DANGEROUS_CONTENT
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_HARASSMENT
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_HATE_SPEECH
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_SEXUALLY_EXPLICIT
+ |
+
+``
+ |
+
+
+HARM_CATEGORY_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/HarmProbability.md b/docs/api/google/generativeai/types/HarmProbability.md
new file mode 100644
index 000000000..d383b3530
--- /dev/null
+++ b/docs/api/google/generativeai/types/HarmProbability.md
@@ -0,0 +1,724 @@
+description: The probability that a piece of content is harmful.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.HarmProbability
+
+
+
+
+
+
+
+The probability that a piece of content is harmful.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.SafetyRating.HarmProbability`
+
+
+
+
+google.generativeai.types.HarmProbability(
+ *args, **kwds
+)
+
+
+
+
+
+
+The classification system gives the probability of the content
+being unsafe. This does not indicate the severity of harm for a
+piece of content.
+
+
+
+
+Values |
+
+
+
+`HARM_PROBABILITY_UNSPECIFIED`
+ |
+
+`0`
+
+Probability is unspecified.
+ |
+
+
+`NEGLIGIBLE`
+ |
+
+`1`
+
+Content has a negligible chance of being
+unsafe.
+ |
+
+
+`LOW`
+ |
+
+`2`
+
+Content has a low chance of being unsafe.
+ |
+
+
+`MEDIUM`
+ |
+
+`3`
+
+Content has a medium chance of being unsafe.
+ |
+
+
+`HIGH`
+ |
+
+`4`
+
+Content has a high chance of being unsafe.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+HARM_PROBABILITY_UNSPECIFIED
+ |
+
+``
+ |
+
+
+HIGH
+ |
+
+``
+ |
+
+
+LOW
+ |
+
+``
+ |
+
+
+MEDIUM
+ |
+
+``
+ |
+
+
+NEGLIGIBLE
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/IncompleteIterationError.md b/docs/api/google/generativeai/types/IncompleteIterationError.md
new file mode 100644
index 000000000..03eaced0f
--- /dev/null
+++ b/docs/api/google/generativeai/types/IncompleteIterationError.md
@@ -0,0 +1,27 @@
+description: Common base class for all non-exit exceptions.
+
+
+
+
+
+
+# google.generativeai.types.IncompleteIterationError
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/MessageDict.md b/docs/api/google/generativeai/types/MessageDict.md
new file mode 100644
index 000000000..2b9d0d5ba
--- /dev/null
+++ b/docs/api/google/generativeai/types/MessageDict.md
@@ -0,0 +1,27 @@
+description: A dict representation of a protos.Message
.
+
+
+
+
+
+
+# google.generativeai.types.MessageDict
+
+
+
+
+
+
+
+A dict representation of a protos.Message
.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/MessageOptions.md b/docs/api/google/generativeai/types/MessageOptions.md
new file mode 100644
index 000000000..9e7ad68e7
--- /dev/null
+++ b/docs/api/google/generativeai/types/MessageOptions.md
@@ -0,0 +1,25 @@
+
+
+
+
+
+# google.generativeai.types.MessageOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+MessageOptions = Union[
+ str,
+ google.generativeai.types.MessageDict
,
+ google.generativeai.protos.Message
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/MessagePromptDict.md b/docs/api/google/generativeai/types/MessagePromptDict.md
new file mode 100644
index 000000000..51abe73fe
--- /dev/null
+++ b/docs/api/google/generativeai/types/MessagePromptDict.md
@@ -0,0 +1,27 @@
+description: A dict representation of a protos.MessagePrompt
.
+
+
+
+
+
+
+# google.generativeai.types.MessagePromptDict
+
+
+
+
+
+
+
+A dict representation of a protos.MessagePrompt
.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/MessagePromptOptions.md b/docs/api/google/generativeai/types/MessagePromptOptions.md
new file mode 100644
index 000000000..54a0dc48c
--- /dev/null
+++ b/docs/api/google/generativeai/types/MessagePromptOptions.md
@@ -0,0 +1,27 @@
+
+
+
+
+
+# google.generativeai.types.MessagePromptOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+MessagePromptOptions = Union[
+ str,
+ google.generativeai.protos.Message
,
+ Iterable[Union[str, google.generativeai.protos.Message
]],
+ google.generativeai.types.MessagePromptDict
,
+ google.generativeai.protos.MessagePrompt
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/MessagesOptions.md b/docs/api/google/generativeai/types/MessagesOptions.md
new file mode 100644
index 000000000..77c310024
--- /dev/null
+++ b/docs/api/google/generativeai/types/MessagesOptions.md
@@ -0,0 +1,26 @@
+
+
+
+
+
+# google.generativeai.types.MessagesOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+MessagesOptions = Union[
+ str,
+ google.generativeai.types.MessageDict
,
+ google.generativeai.protos.Message
,
+ Iterable[google.generativeai.types.MessageOptions
]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Model.md b/docs/api/google/generativeai/types/Model.md
new file mode 100644
index 000000000..e3c122a06
--- /dev/null
+++ b/docs/api/google/generativeai/types/Model.md
@@ -0,0 +1,205 @@
+description: A dataclass representation of a protos.Model
.
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.Model
+
+
+
+
+
+
+
+A dataclass representation of a protos.Model
.
+
+
+google.generativeai.types.Model(
+ name: str,
+ base_model_id: str,
+ version: str,
+ display_name: str,
+ description: str,
+ input_token_limit: int,
+ output_token_limit: int,
+ supported_generation_methods: list[str],
+ temperature: (float | None) = None,
+ max_temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (int | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming
+convention of: "{base_model_id}-{version}". For example: `models/chat-bison-001`.
+ |
+
+
+`base_model_id`
+ |
+
+The base name of the model. For example: `chat-bison`.
+ |
+
+
+`version`
+ |
+
+ The major version number of the model. For example: `001`.
+ |
+
+
+`display_name`
+ |
+
+The human-readable name of the model. E.g. `"Chat Bison"`. The name can be up
+to 128 characters long and can consist of any UTF-8 characters.
+ |
+
+
+`description`
+ |
+
+A short description of the model.
+ |
+
+
+`input_token_limit`
+ |
+
+Maximum number of input tokens allowed for this model.
+ |
+
+
+`output_token_limit`
+ |
+
+Maximum number of output tokens available for this model.
+ |
+
+
+`supported_generation_methods`
+ |
+
+lists which methods are supported by the model. The method
+names are defined as Pascal case strings, such as `generateMessage` which correspond to
+API methods.
+ |
+
+
+`temperature`
+ |
+
+Dataclass field
+ |
+
+
+`max_temperature`
+ |
+
+Dataclass field
+ |
+
+
+`top_p`
+ |
+
+Dataclass field
+ |
+
+
+`top_k`
+ |
+
+Dataclass field
+ |
+
+
+
+
+
+## Methods
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+max_temperature
+ |
+
+`None`
+ |
+
+
+temperature
+ |
+
+`None`
+ |
+
+
+top_k
+ |
+
+`None`
+ |
+
+
+top_p
+ |
+
+`None`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/ModelsIterable.md b/docs/api/google/generativeai/types/ModelsIterable.md
new file mode 100644
index 000000000..e5e4f8774
--- /dev/null
+++ b/docs/api/google/generativeai/types/ModelsIterable.md
@@ -0,0 +1,23 @@
+
+
+
+
+
+# google.generativeai.types.ModelsIterable
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ModelsIterable = Iterable[
+ google.generativeai.types.Model
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/PartDict.md b/docs/api/google/generativeai/types/PartDict.md
new file mode 100644
index 000000000..ccf40e29e
--- /dev/null
+++ b/docs/api/google/generativeai/types/PartDict.md
@@ -0,0 +1,27 @@
+description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+
+
+
+
+# google.generativeai.types.PartDict
+
+
+
+
+
+
+
+dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+ For example: dict(one=1, two=2)
+
diff --git a/docs/api/google/generativeai/types/PartType.md b/docs/api/google/generativeai/types/PartType.md
new file mode 100644
index 000000000..0490bae6b
--- /dev/null
+++ b/docs/api/google/generativeai/types/PartType.md
@@ -0,0 +1,35 @@
+
+
+
+
+
+# google.generativeai.types.PartType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+PartType = Union[
+ google.generativeai.protos.Part
,
+ google.generativeai.types.PartDict
,
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image,
+ str,
+ google.generativeai.protos.FunctionCall
,
+ google.generativeai.protos.FunctionResponse
,
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Permission.md b/docs/api/google/generativeai/types/Permission.md
new file mode 100644
index 000000000..22697e497
--- /dev/null
+++ b/docs/api/google/generativeai/types/Permission.md
@@ -0,0 +1,274 @@
+description: A permission to access a resource.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.Permission
+
+
+
+
+
+
+
+A permission to access a resource.
+
+
+google.generativeai.types.Permission(
+ name: str,
+ role: RoleOptions,
+ grantee_type: Optional[GranteeTypeOptions] = None,
+ email_address: Optional[str] = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`name`
+ |
+
+Dataclass field
+ |
+
+
+`role`
+ |
+
+Dataclass field
+ |
+
+
+`grantee_type`
+ |
+
+Dataclass field
+ |
+
+
+`email_address`
+ |
+
+Dataclass field
+ |
+
+
+
+
+
+## Methods
+
+delete
+
+View source
+
+
+delete(
+ client: (glm.PermissionServiceClient | None) = None
+) -> None
+
+
+Delete permission (self).
+
+
+delete_async
+
+View source
+
+
+delete_async(
+ client=None
+)
+
+
+This is the async version of Permission.delete
.
+
+
+get
+
+View source
+
+
+@classmethod
+get(
+ name: str, client: (glm.PermissionServiceClient | None) = None
+) -> Permission
+
+
+Get information about a specific permission.
+
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+The name of the permission to get.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+Requested permission as an instance of `Permission`.
+ |
+
+
+
+
+
+
+get_async
+
+View source
+
+
+get_async(
+ name, client=None
+)
+
+
+This is the async version of Permission.get
.
+
+
+to_dict
+
+View source
+
+
+to_dict() -> dict[str, Any]
+
+
+
+
+
+update
+
+View source
+
+
+update(
+ updates: dict[str, Any],
+ client: (glm.PermissionServiceClient | None) = None
+) -> Permission
+
+
+Update a list of fields for a specified permission.
+
+
+
+
+
+Args |
+
+
+
+`updates`
+ |
+
+The list of fields to update.
+Currently only `role` is supported as an update path.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+`Permission` object with specified updates.
+ |
+
+
+
+
+
+
+update_async
+
+View source
+
+
+update_async(
+ updates, client=None
+)
+
+
+This is the async version of Permission.update
.
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+email_address
+ |
+
+`None`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/Permissions.md b/docs/api/google/generativeai/types/Permissions.md
new file mode 100644
index 000000000..3f889fd17
--- /dev/null
+++ b/docs/api/google/generativeai/types/Permissions.md
@@ -0,0 +1,386 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.Permissions
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.Permissions(
+ parent
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`parent`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+create
+
+View source
+
+
+create(
+ role: RoleOptions,
+ grantee_type: Optional[GranteeTypeOptions] = None,
+ email_address: Optional[str] = None,
+ client: (glm.PermissionServiceClient | None) = None
+) -> Permission
+
+
+Create a new permission on a resource (self).
+
+
+
+
+
+Args |
+
+
+
+`parent`
+ |
+
+The resource name of the parent resource in which the permission will be listed.
+ |
+
+
+`role`
+ |
+
+role that will be granted by the permission.
+ |
+
+
+`grantee_type`
+ |
+
+The type of the grantee for the permission.
+ |
+
+
+`email_address`
+ |
+
+The email address of the grantee.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+`Permission` object with specified parent, role, grantee type, and email address.
+ |
+
+
+
+
+
+
+
+
+
+Raises |
+
+
+
+`ValueError`
+ |
+
+When email_address is specified and grantee_type is set to EVERYONE.
+ |
+
+
+`ValueError`
+ |
+
+When email_address is not specified and grantee_type is not set to EVERYONE.
+ |
+
+
+
+
+
+create_async
+
+View source
+
+
+create_async(
+ role, grantee_type=None, email_address=None, client=None
+)
+
+
+This is the async version of `PermissionAdapter.create_permission`.
+
+
+get
+
+View source
+
+
+@classmethod
+get(
+ name: str
+) -> Permission
+
+
+Get information about a specific permission.
+
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+The name of the permission to get.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+Requested permission as an instance of `Permission`.
+ |
+
+
+
+
+
+
+get_async
+
+View source
+
+
+get_async(
+ name
+)
+
+
+Get information about a specific permission.
+
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+The name of the permission to get.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+Requested permission as an instance of `Permission`.
+ |
+
+
+
+
+
+
+list
+
+View source
+
+
+list(
+ page_size: Optional[int] = None,
+ client: (glm.PermissionServiceClient | None) = None
+) -> Iterable[Permission]
+
+
+List `Permission`s enforced on a resource (self).
+
+
+
+
+
+Args |
+
+
+
+`parent`
+ |
+
+The resource name of the parent resource in which the permission will be listed.
+ |
+
+
+`page_size`
+ |
+
+The maximum number of permissions to return (per page). The service may return fewer permissions.
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+Paginated list of `Permission` objects.
+ |
+
+
+
+
+
+
+list_async
+
+View source
+
+
+list_async(
+ page_size=None, client=None
+)
+
+
+This is the async version of `PermissionAdapter.list_permissions`.
+
+
+transfer_ownership
+
+View source
+
+
+transfer_ownership(
+ email_address: str, client: (glm.PermissionServiceClient | None) = None
+) -> None
+
+
+Transfer ownership of a resource (self) to a new owner.
+
+
+
+
+
+Args |
+
+
+
+`name`
+ |
+
+Name of the resource to transfer ownership.
+ |
+
+
+`email_address`
+ |
+
+Email address of the new owner.
+ |
+
+
+
+
+
+transfer_ownership_async
+
+View source
+
+
+transfer_ownership_async(
+ email_address, client=None
+)
+
+
+This is the async version of `PermissionAdapter.transfer_ownership`.
+
+
+__iter__
+
+View source
+
+
+__iter__()
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/RequestOptions.md b/docs/api/google/generativeai/types/RequestOptions.md
new file mode 100644
index 000000000..1e3cfd324
--- /dev/null
+++ b/docs/api/google/generativeai/types/RequestOptions.md
@@ -0,0 +1,209 @@
+description: Request options
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.RequestOptions
+
+
+
+
+
+
+
+Request options
+
+
+google.generativeai.types.RequestOptions(
+ *,
+ retry: (google.api_core.retry.Retry | None) = None,
+ timeout: (int | float | google.api_core.timeout.TimeToDeadlineTimeout | None) = None
+)
+
+
+
+
+
+
+
+```
+>>> import google.generativeai as genai
+>>> from google.generativeai.types import RequestOptions
+>>> from google.api_core import retry
+>>>
+>>> model = genai.GenerativeModel()
+>>> response = model.generate_content('Hello',
+... request_options=RequestOptions(
+... retry=retry.Retry(initial=10, multiplier=2, maximum=60, timeout=300)))
+>>> response = model.generate_content('Hello',
+... request_options=RequestOptions(timeout=600)))
+```
+
+
+
+
+Args |
+
+
+
+`retry`
+ |
+
+Refer to [retry docs](https://googleapis.dev/python/google-api-core/latest/retry.html) for details.
+ |
+
+
+`timeout`
+ |
+
+In seconds (or provide a [TimeToDeadlineTimeout](https://googleapis.dev/python/google-api-core/latest/timeout.html) object).
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`retry`
+ |
+
+Dataclass field
+ |
+
+
+`timeout`
+ |
+
+Dataclass field
+ |
+
+
+
+
+
+## Methods
+
+get
+
+
+get(
+ key, default=None
+)
+
+
+D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
+
+
+items
+
+
+items()
+
+
+D.items() -> a set-like object providing a view on D's items
+
+
+keys
+
+
+keys()
+
+
+D.keys() -> a set-like object providing a view on D's keys
+
+
+values
+
+
+values()
+
+
+D.values() -> an object providing a view on D's values
+
+
+__contains__
+
+
+__contains__(
+ key
+)
+
+
+
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__getitem__
+
+View source
+
+
+__getitem__(
+ item
+)
+
+
+
+
+
+__iter__
+
+View source
+
+
+__iter__()
+
+
+
+
+
+__len__
+
+View source
+
+
+__len__()
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/RequestOptionsType.md b/docs/api/google/generativeai/types/RequestOptionsType.md
new file mode 100644
index 000000000..aeee187f1
--- /dev/null
+++ b/docs/api/google/generativeai/types/RequestOptionsType.md
@@ -0,0 +1,24 @@
+
+
+
+
+
+# google.generativeai.types.RequestOptionsType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+RequestOptionsType = Union[
+ google.generativeai.types.RequestOptions
,
+ google.generativeai.types.helper_types.RequestOptionsDict
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ResponseDict.md b/docs/api/google/generativeai/types/ResponseDict.md
new file mode 100644
index 000000000..2a5119a22
--- /dev/null
+++ b/docs/api/google/generativeai/types/ResponseDict.md
@@ -0,0 +1,27 @@
+description: A dict representation of a protos.GenerateMessageResponse
.
+
+
+
+
+
+
+# google.generativeai.types.ResponseDict
+
+
+
+
+
+
+
+A dict representation of a protos.GenerateMessageResponse
.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/SafetyFeedbackDict.md b/docs/api/google/generativeai/types/SafetyFeedbackDict.md
new file mode 100644
index 000000000..121d94e3a
--- /dev/null
+++ b/docs/api/google/generativeai/types/SafetyFeedbackDict.md
@@ -0,0 +1,63 @@
+description: Safety feedback for an entire request.
+
+
+
+
+
+
+# google.generativeai.types.SafetyFeedbackDict
+
+
+
+
+
+
+
+Safety feedback for an entire request.
+
+
+
+This field is populated if content in the input and/or response
+is blocked due to safety settings. SafetyFeedback may not exist
+for every HarmCategory. Each SafetyFeedback will return the
+safety settings used by the request as well as the lowest
+HarmProbability that should be allowed in order to return a
+result.
+
+
+
+
+
+
+Attributes |
+
+
+
+`rating`
+ |
+
+`google.ai.generativelanguage.SafetyRating`
+
+Safety rating evaluated from content.
+ |
+
+
+`setting`
+ |
+
+`google.ai.generativelanguage.SafetySetting`
+
+Safety settings applied to the request.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/SafetyRatingDict.md b/docs/api/google/generativeai/types/SafetyRatingDict.md
new file mode 100644
index 000000000..bc10071b5
--- /dev/null
+++ b/docs/api/google/generativeai/types/SafetyRatingDict.md
@@ -0,0 +1,73 @@
+description: Safety rating for a piece of content.
+
+
+
+
+
+
+# google.generativeai.types.SafetyRatingDict
+
+
+
+
+
+
+
+Safety rating for a piece of content.
+
+
+
+The safety rating contains the category of harm and the harm
+probability level in that category for a piece of content.
+Content is classified for safety across a number of harm
+categories and the probability of the harm classification is
+included here.
+
+
+
+
+
+
+Attributes |
+
+
+
+`category`
+ |
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this rating.
+ |
+
+
+`probability`
+ |
+
+`google.ai.generativelanguage.SafetyRating.HarmProbability`
+
+Required. The probability of harm for this
+content.
+ |
+
+
+`blocked`
+ |
+
+`bool`
+
+Was this content blocked because of this
+rating?
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/SafetySettingDict.md b/docs/api/google/generativeai/types/SafetySettingDict.md
new file mode 100644
index 000000000..fd15a443b
--- /dev/null
+++ b/docs/api/google/generativeai/types/SafetySettingDict.md
@@ -0,0 +1,60 @@
+description: Safety setting, affecting the safety-blocking behavior.
+
+
+
+
+
+
+# google.generativeai.types.SafetySettingDict
+
+
+
+
+
+
+
+Safety setting, affecting the safety-blocking behavior.
+
+
+
+Passing a safety setting for a category changes the allowed
+probability that content is blocked.
+
+
+
+
+
+
+Attributes |
+
+
+
+`category`
+ |
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this setting.
+ |
+
+
+`threshold`
+ |
+
+`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold`
+
+Required. Controls the probability threshold
+at which harm is blocked.
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Status.md b/docs/api/google/generativeai/types/Status.md
new file mode 100644
index 000000000..c306a9ded
--- /dev/null
+++ b/docs/api/google/generativeai/types/Status.md
@@ -0,0 +1,55 @@
+description: A ProtocolMessage
+
+
+
+
+
+
+# google.generativeai.types.Status
+
+
+
+
+
+
+
+A ProtocolMessage
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`code`
+ |
+
+`int32 code`
+ |
+
+
+`details`
+ |
+
+`repeated Any details`
+ |
+
+
+`message`
+ |
+
+`string message`
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/StopCandidateException.md b/docs/api/google/generativeai/types/StopCandidateException.md
new file mode 100644
index 000000000..56484a013
--- /dev/null
+++ b/docs/api/google/generativeai/types/StopCandidateException.md
@@ -0,0 +1,27 @@
+description: Common base class for all non-exit exceptions.
+
+
+
+
+
+
+# google.generativeai.types.StopCandidateException
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/StrictContentType.md b/docs/api/google/generativeai/types/StrictContentType.md
new file mode 100644
index 000000000..cfd497595
--- /dev/null
+++ b/docs/api/google/generativeai/types/StrictContentType.md
@@ -0,0 +1,24 @@
+
+
+
+
+
+# google.generativeai.types.StrictContentType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+StrictContentType = Union[
+ google.generativeai.protos.Content
,
+ google.generativeai.types.ContentDict
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Tool.md b/docs/api/google/generativeai/types/Tool.md
new file mode 100644
index 000000000..ac49e31da
--- /dev/null
+++ b/docs/api/google/generativeai/types/Tool.md
@@ -0,0 +1,107 @@
+description: A wrapper for protos.Tool
, Contains a collection of related FunctionDeclaration objects.
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.Tool
+
+
+
+
+
+
+
+A wrapper for protos.Tool
, Contains a collection of related `FunctionDeclaration` objects.
+
+
+google.generativeai.types.Tool(
+ function_declarations: (Iterable[FunctionDeclarationType] | None) = None,
+ code_execution: (protos.CodeExecution | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`code_execution`
+ |
+
+
+ |
+
+
+`function_declarations`
+ |
+
+
+ |
+
+
+
+
+
+## Methods
+
+to_proto
+
+View source
+
+
+to_proto()
+
+
+
+
+
+__call__
+
+View source
+
+
+__call__(
+ fc: protos.FunctionCall
+) -> (protos.FunctionResponse | None)
+
+
+Call self as a function.
+
+
+__getitem__
+
+View source
+
+
+__getitem__(
+ name: (str | protos.FunctionCall)
+) -> (FunctionDeclaration | protos.FunctionDeclaration)
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ToolDict.md b/docs/api/google/generativeai/types/ToolDict.md
new file mode 100644
index 000000000..01814ceb3
--- /dev/null
+++ b/docs/api/google/generativeai/types/ToolDict.md
@@ -0,0 +1,27 @@
+description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+
+
+
+
+# google.generativeai.types.ToolDict
+
+
+
+
+
+
+
+dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list.
+
+
+ For example: dict(one=1, two=2)
+
diff --git a/docs/api/google/generativeai/types/ToolsType.md b/docs/api/google/generativeai/types/ToolsType.md
new file mode 100644
index 000000000..9b9430a65
--- /dev/null
+++ b/docs/api/google/generativeai/types/ToolsType.md
@@ -0,0 +1,31 @@
+
+
+
+
+
+# google.generativeai.types.ToolsType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ToolsType = Union[
+ Iterable[Union[google.generativeai.types.Tool
, google.generativeai.protos.Tool
, google.generativeai.types.ToolDict
, Iterable[google.generativeai.types.FunctionDeclarationType
], google.generativeai.types.FunctionDeclaration
, google.generativeai.protos.FunctionDeclaration
, dict[str, Any], Callable[..., Any]]],
+ google.generativeai.types.Tool
,
+ google.generativeai.protos.Tool
,
+ google.generativeai.types.ToolDict
,
+ Iterable[google.generativeai.types.FunctionDeclarationType
],
+ google.generativeai.types.FunctionDeclaration
,
+ google.generativeai.protos.FunctionDeclaration
,
+ dict[str, Any],
+ Callable[..., Any]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/TunedModel.md b/docs/api/google/generativeai/types/TunedModel.md
new file mode 100644
index 000000000..dfe35d4b6
--- /dev/null
+++ b/docs/api/google/generativeai/types/TunedModel.md
@@ -0,0 +1,272 @@
+description: A dataclass representation of a protos.TunedModel
.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.TunedModel
+
+
+
+
+
+
+
+A dataclass representation of a protos.TunedModel
.
+
+
+google.generativeai.types.TunedModel(
+ name: (str | None) = None,
+ source_model: (str | None) = None,
+ base_model: (str | None) = None,
+ display_name: str = '',
+ description: str = '',
+ temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (float | None) = None,
+ state: TunedModelState = TunedModelState.STATE_UNSPECIFIED,
+ create_time: (datetime.datetime | None) = None,
+ update_time: (datetime.datetime | None) = None,
+ tuning_task: (TuningTask | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`permissions`
+ |
+
+
+ |
+
+
+`name`
+ |
+
+Dataclass field
+ |
+
+
+`source_model`
+ |
+
+Dataclass field
+ |
+
+
+`base_model`
+ |
+
+Dataclass field
+ |
+
+
+`display_name`
+ |
+
+Dataclass field
+ |
+
+
+`description`
+ |
+
+Dataclass field
+ |
+
+
+`temperature`
+ |
+
+Dataclass field
+ |
+
+
+`top_p`
+ |
+
+Dataclass field
+ |
+
+
+`top_k`
+ |
+
+Dataclass field
+ |
+
+
+`state`
+ |
+
+Dataclass field
+ |
+
+
+`create_time`
+ |
+
+Dataclass field
+ |
+
+
+`update_time`
+ |
+
+Dataclass field
+ |
+
+
+`tuning_task`
+ |
+
+Dataclass field
+ |
+
+
+
+
+
+## Methods
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+base_model
+ |
+
+`None`
+ |
+
+
+create_time
+ |
+
+`None`
+ |
+
+
+description
+ |
+
+`''`
+ |
+
+
+display_name
+ |
+
+`''`
+ |
+
+
+name
+ |
+
+`None`
+ |
+
+
+source_model
+ |
+
+`None`
+ |
+
+
+state
+ |
+
+``
+ |
+
+
+temperature
+ |
+
+`None`
+ |
+
+
+top_k
+ |
+
+`None`
+ |
+
+
+top_p
+ |
+
+`None`
+ |
+
+
+tuning_task
+ |
+
+`None`
+ |
+
+
+update_time
+ |
+
+`None`
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/TunedModelNameOptions.md b/docs/api/google/generativeai/types/TunedModelNameOptions.md
new file mode 100644
index 000000000..3f52c4a87
--- /dev/null
+++ b/docs/api/google/generativeai/types/TunedModelNameOptions.md
@@ -0,0 +1,25 @@
+
+
+
+
+
+# google.generativeai.types.TunedModelNameOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+TunedModelNameOptions = Union[
+ str,
+ google.generativeai.types.TunedModel
,
+ google.generativeai.protos.TunedModel
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/TunedModelState.md b/docs/api/google/generativeai/types/TunedModelState.md
new file mode 100644
index 000000000..26b0d4bfe
--- /dev/null
+++ b/docs/api/google/generativeai/types/TunedModelState.md
@@ -0,0 +1,703 @@
+description: The state of the tuned model.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# google.generativeai.types.TunedModelState
+
+
+
+
+
+
+
+The state of the tuned model.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.TunedModel.State`
+
+
+
+
+google.generativeai.types.TunedModelState(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+`STATE_UNSPECIFIED`
+ |
+
+`0`
+
+The default value. This value is unused.
+ |
+
+
+`CREATING`
+ |
+
+`1`
+
+The model is being created.
+ |
+
+
+`ACTIVE`
+ |
+
+`2`
+
+The model is ready to be used.
+ |
+
+
+`FAILED`
+ |
+
+`3`
+
+The model failed to be created.
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+`denominator`
+ |
+
+the denominator of a rational number in lowest terms
+ |
+
+
+`imag`
+ |
+
+the imaginary part of a complex number
+ |
+
+
+`numerator`
+ |
+
+the numerator of a rational number in lowest terms
+ |
+
+
+`real`
+ |
+
+the real part of a complex number
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return integer ratio.
+
+Return a pair of integers, whose ratio is exactly equal to the original int
+and with a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+ACTIVE
+ |
+
+``
+ |
+
+
+CREATING
+ |
+
+``
+ |
+
+
+FAILED
+ |
+
+``
+ |
+
+
+STATE_UNSPECIFIED
+ |
+
+``
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/TypedDict.md b/docs/api/google/generativeai/types/TypedDict.md
new file mode 100644
index 000000000..00a68a8e3
--- /dev/null
+++ b/docs/api/google/generativeai/types/TypedDict.md
@@ -0,0 +1,73 @@
+description: A simple typed namespace. At runtime it is equivalent to a plain dict.
+
+
+
+
+
+
+# google.generativeai.types.TypedDict
+
+
+
+
+
+
+
+A simple typed namespace. At runtime it is equivalent to a plain dict.
+
+
+
+google.generativeai.types.TypedDict(
+ typename, fields, /, *, total=True, **kwargs
+)
+
+
+
+
+
+
+TypedDict creates a dictionary type such that a type checker will expect all
+instances to have a certain set of keys, where each key is
+associated with a value of a consistent type. This expectation
+is not checked at runtime.
+
+Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+The type info can be accessed via the Point2D.__annotations__ dict, and
+the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+TypedDict supports an additional equivalent form::
+
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+By default, all keys must be present in a TypedDict. It is possible
+to override this by specifying totality::
+
+ class Point2D(TypedDict, total=False):
+ x: int
+ y: int
+
+This means that a Point2D TypedDict can have any of the keys omitted. A type
+checker is only expected to support a literal False or True as the value of
+the total argument. True is the default, and makes all items defined in the
+class body be required.
+
+The Required and NotRequired special forms can also be used to mark
+individual keys as being required or not required::
+
+ class Point2D(TypedDict):
+ x: int # the "x" key must always be present (Required is the default)
+ y: NotRequired[int] # the "y" key can be omitted
+
+See PEP 655 for more details on Required and NotRequired.
\ No newline at end of file
diff --git a/docs/api/google/generativeai/types/get_default_file_client.md b/docs/api/google/generativeai/types/get_default_file_client.md
new file mode 100644
index 000000000..03aff0033
--- /dev/null
+++ b/docs/api/google/generativeai/types/get_default_file_client.md
@@ -0,0 +1,30 @@
+
+
+
+
+
+# google.generativeai.types.get_default_file_client
+
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.get_default_file_client() -> glm.FilesServiceClient
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/to_file_data.md b/docs/api/google/generativeai/types/to_file_data.md
new file mode 100644
index 000000000..5e263eb3b
--- /dev/null
+++ b/docs/api/google/generativeai/types/to_file_data.md
@@ -0,0 +1,32 @@
+
+
+
+
+
+# google.generativeai.types.to_file_data
+
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.to_file_data(
+ file_data: FileDataType
+)
+
+
+
+
+
diff --git a/docs/api/google/generativeai/update_tuned_model.md b/docs/api/google/generativeai/update_tuned_model.md
new file mode 100644
index 000000000..b1f1dc9eb
--- /dev/null
+++ b/docs/api/google/generativeai/update_tuned_model.md
@@ -0,0 +1,38 @@
+description: Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
+
+
+
+
+
+
+# google.generativeai.update_tuned_model
+
+
+
+
+
+
+
+Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
+
+
+
+google.generativeai.update_tuned_model(
+ tuned_model: (str | protos.TunedModel),
+ updates: (dict[str, Any] | None) = None,
+ *,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.TunedModel
+
+
+
+
+
diff --git a/docs/api/google/generativeai/upload_file.md b/docs/api/google/generativeai/upload_file.md
new file mode 100644
index 000000000..7a18ba41a
--- /dev/null
+++ b/docs/api/google/generativeai/upload_file.md
@@ -0,0 +1,105 @@
+description: Calls the API to upload a file using a supported file service.
+
+
+
+
+
+
+# google.generativeai.upload_file
+
+
+
+
+
+
+
+Calls the API to upload a file using a supported file service.
+
+
+
+google.generativeai.upload_file(
+ path: (str | pathlib.Path | os.PathLike),
+ *,
+ mime_type: (str | None) = None,
+ name: (str | None) = None,
+ display_name: (str | None) = None,
+ resumable: bool = True
+) -> file_types.File
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+`path`
+ |
+
+The path to the file to be uploaded.
+ |
+
+
+`mime_type`
+ |
+
+The MIME type of the file. If not provided, it will be
+inferred from the file extension.
+ |
+
+
+`name`
+ |
+
+The name of the file in the destination (e.g., 'files/sample-image').
+If not provided, a system generated ID will be created.
+ |
+
+
+`display_name`
+ |
+
+Optional display name of the file.
+ |
+
+
+`resumable`
+ |
+
+Whether to use the resumable upload protocol. By default, this is enabled.
+See details at
+https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http.MediaFileUpload-class.html#resumable
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+`file_types.File`
+ |
+
+The response of the uploaded file.
+ |
+
+
+
From 0f8f139e6a77b95aad58f001202cf82cdc230bf5 Mon Sep 17 00:00:00 2001
From: Guillaume Vernade
Date: Mon, 22 Jul 2024 18:26:05 +0000
Subject: [PATCH 17/90] Using the `GEMINI_API_KEY` by default instead of the
`GOOGLE_API_KEY` one (#418)
* Using the GEMINI_API_KEY by default instead of the GOOGLE_API_KEY one
The Google API key can be misleading since it's not really an overall key for all of Google API and only for the Gemini ones.
* Formatting
* Update google/generativeai/client.py
* revert elif
---------
Co-authored-by: Mark Daoust
---
README.md | 2 +-
google/generativeai/client.py | 12 +++++++++---
google/generativeai/types/discuss_types.py | 2 +-
3 files changed, 11 insertions(+), 5 deletions(-)
diff --git a/README.md b/README.md
index c0200f5b2..99d387bd7 100644
--- a/README.md
+++ b/README.md
@@ -27,7 +27,7 @@ See the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbo
import google.generativeai as genai
import os
-genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
+genai.configure(api_key=os.environ["GEMINI_API_KEY"])
```
3. Create a model and run a prompt.
diff --git a/google/generativeai/client.py b/google/generativeai/client.py
index 5d7b6996b..7e2193890 100644
--- a/google/generativeai/client.py
+++ b/google/generativeai/client.py
@@ -132,7 +132,8 @@ def configure(
"""Initializes default client configurations using specified parameters or environment variables.
If no API key has been provided (either directly, or on `client_options`) and the
- `GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
+ `GEMINI_API_KEY` environment variable is set, it will be used as the API key. If not,
+ if the `GOOGLE_API_KEY` environement variable is set, it will be used as the API key.
Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in
`google.ai.generativelanguage` for details on the other arguments.
@@ -141,8 +142,8 @@ def configure(
transport: A string, one of: [`rest`, `grpc`, `grpc_asyncio`].
api_key: The API-Key to use when creating the default clients (each service uses
a separate client). This is a shortcut for `client_options={"api_key": api_key}`.
- If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be
- used.
+ If omitted, and the `GEMINI_API_KEY` or the `GOOGLE_API_KEY` environment variable
+ are set, they will be used in this order of priority.
default_metadata: Default (key, value) metadata pairs to send with every request.
when using `transport="rest"` these are sent as HTTP headers.
"""
@@ -162,6 +163,11 @@ def configure(
if api_key is None:
# If no key is provided explicitly, attempt to load one from the
# environment.
+ api_key = os.getenv("GEMINI_API_KEY")
+
+ if api_key is None:
+ # If the GEMINI_API_KEY doesn't exist, attempt to load the
+ # GOOGLE_API_KEY from the environment.
api_key = os.getenv("GOOGLE_API_KEY")
client_options.api_key = api_key
diff --git a/google/generativeai/types/discuss_types.py b/google/generativeai/types/discuss_types.py
index a538da65c..05ad262f3 100644
--- a/google/generativeai/types/discuss_types.py
+++ b/google/generativeai/types/discuss_types.py
@@ -121,7 +121,7 @@ class ChatResponse(abc.ABC):
```
import google.generativeai as genai
- genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
+ genai.configure(api_key=os.environ['GEMINI_API_KEY'])
response = genai.chat(messages=["Hello."])
print(response.last) # 'Hello! What can I help you with?'
From 5b31be7ff74aa0e6eb41a13d619ad8f116a4e1fd Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Mon, 22 Jul 2024 17:18:05 -0700
Subject: [PATCH 18/90] Add REST embeddings & system_instruction sample (#429)
* Add REST embeddings sample
* Add system_instruction shell script as well
* Update region tags
---
samples/rest/embeddings.sh | 32 ++++++++++++++++++++++++++++++
samples/rest/system_instruction.sh | 13 ++++++++++++
2 files changed, 45 insertions(+)
create mode 100644 samples/rest/embeddings.sh
create mode 100644 samples/rest/system_instruction.sh
diff --git a/samples/rest/embeddings.sh b/samples/rest/embeddings.sh
new file mode 100644
index 000000000..26fa11d44
--- /dev/null
+++ b/samples/rest/embeddings.sh
@@ -0,0 +1,32 @@
+set -eu
+
+echo "[START embed_content]"
+# [START embed_content]
+curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent?key=$GOOGLE_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{"model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "Hello world"}]}, }' 2> /dev/null | head
+# [END embed_content]
+
+echo "[START batch_embed_contents]"
+# [START batch_embed_contents]
+curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:batchEmbedContents?key=$GOOGLE_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{"requests": [{
+ "model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "What is the meaning of life?"}]}, },
+ {
+ "model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "How much wood would a woodchuck chuck?"}]}, },
+ {
+ "model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "How does the brain work?"}]}, }, ]}' 2> /dev/null | grep -C 5 values
+# [END batch_embed_contents]
\ No newline at end of file
diff --git a/samples/rest/system_instruction.sh b/samples/rest/system_instruction.sh
new file mode 100644
index 000000000..6a32c8f58
--- /dev/null
+++ b/samples/rest/system_instruction.sh
@@ -0,0 +1,13 @@
+set -eu
+
+echo "[START system_instruction]"
+# [START system_instruction]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{ "system_instruction": {
+ "parts":
+ { "text": "You are a cat. Your name is Neko."}},
+ "contents": {
+ "parts": {
+ "text": "Hello there"}}}'
+# [END system_instruction]
\ No newline at end of file
From d3ca154c589b0f8ac547863dce61bded2f4cfc71 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Mon, 22 Jul 2024 17:18:14 -0700
Subject: [PATCH 19/90] `text_generation` samples for shell (#430)
* Start on text_generation samples for shell
* Add example for one image in text gen
* Add streaming example for one image
* Adding rest of text generation examples
* change to gemini-1.5-flash
* Add updates to text generation scripts
* Using file api to upload audio and video
* Delete audio_output.txt
* Debugged audio example
* Uploading videos now working for text generation
* Delete file_info.json
* Remove stray tag.
---------
Co-authored-by: Mark Daoust
---
samples/rest/text_generation.sh | 247 ++++++++++++++++++++++++++++++++
1 file changed, 247 insertions(+)
create mode 100644 samples/rest/text_generation.sh
diff --git a/samples/rest/text_generation.sh b/samples/rest/text_generation.sh
new file mode 100644
index 000000000..fc2d7b9a0
--- /dev/null
+++ b/samples/rest/text_generation.sh
@@ -0,0 +1,247 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+AUDIO_PATH=${MEDIA_DIR}/sample.mp3
+VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
+
+BASE_URL="https://generativelanguage.googleapis.com"
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
+echo "[START text_gen_text_only_prompt]"
+# [START text_gen_text_only_prompt]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[{"text": "Write a story about a magic backpack."}]
+ }]
+ }' 2> /dev/null
+# [END text_gen_text_only_prompt]
+
+echo "[START text_gen_text_only_prompt_streaming]"
+# [START text_gen_text_only_prompt_streaming]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=${GOOGLE_API_KEY}" \
+ -H 'Content-Type: application/json' \
+ --no-buffer \
+ -d '{ "contents":[{"parts":[{"text": "Write a story about a magic backpack."}]}]}'
+# [END text_gen_text_only_prompt_streaming]
+
+echo "[START text_gen_multimodal_one_image_prompt]"
+# [START text_gen_multimodal_one_image_prompt]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Tell me about this instrument"},
+ {
+ "inline_data": {
+ "mime_type":"image/jpeg",
+ "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
+ }
+ }
+ ]
+ }]
+ }' 2> /dev/null
+# [END text_gen_multimodal_one_image_prompt]
+
+echo "[START text_gen_multimodal_one_image_prompt_streaming]"
+# [START text_gen_multimodal_one_image_prompt_streaming]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Tell me about this instrument"},
+ {
+ "inline_data": {
+ "mime_type":"image/jpeg",
+ "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
+ }
+ }
+ ]
+ }]
+ }' 2> /dev/null
+# [END text_gen_multimodal_one_image_prompt_streaming]
+
+echo "[START text_gen_multimodal_audio]"
+# [START text_gen_multimodal_audio]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}")
+NUM_BYTES=$(wc -c < "${AUDIO_PATH}")
+DISPLAY_NAME=AUDIO
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Please describe this file."},
+ {"file_data":{"mime_type": "audio/mpeg", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END text_gen_multimodal_audio]
+
+echo "[START text_gen_multimodal_video_prompt]"
+# [START text_gen_multimodal_video_prompt]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+name=$(jq ".file.name" file_info.json)
+echo name=$name
+
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Please describe this file."},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END text_gen_multimodal_video_prompt]
+
+echo "[START text_gen_multimodal_video_prompt_streaming]"
+# [START text_gen_multimodal_video_prompt_streaming]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO_PATH
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Please describe this file."},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+# [END text_gen_multimodal_video_prompt_streaming]
\ No newline at end of file
From 99e5a11b552d0bc7810ceec345e609f493e1ab51 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Tue, 23 Jul 2024 10:58:32 -0700
Subject: [PATCH 20/90] Added curl examples for files (#480)
* Added curl examples for files
* Update files.sh
* update files.sh
---
samples/rest/files.sh | 251 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 251 insertions(+)
create mode 100644 samples/rest/files.sh
diff --git a/samples/rest/files.sh b/samples/rest/files.sh
new file mode 100644
index 000000000..ae44b7467
--- /dev/null
+++ b/samples/rest/files.sh
@@ -0,0 +1,251 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+TEXT_PATH=${MEDIA_DIR}/poem.txt
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+IMG_PATH_2=${MEDIA_DIR}/Cajun_instruments.jpg
+AUDIO_PATH=${MEDIA_DIR}/sample.mp3
+VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
+
+BASE_URL="https://generativelanguage.googleapis.com"
+
+echo "[START files_create_text]"
+# [START files_create_text]
+MIME_TYPE=$(file -b --mime-type "${TEXT_PATH}")
+NUM_BYTES=$(wc -c < "${TEXT_PATH}")
+DISPLAY_NAME=TEXT
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${TEXT_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you add a few more lines to this poem?"},
+ {"file_data":{"mime_type": "text/plain", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+
+echo "[START files_get]"
+# [START files_get]
+name=$(jq ".file.name" file_info.json)
+# Get the file of interest to check state
+curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+# Print some information about the file you got
+name=$(jq ".file.name" file_info.json)
+echo name=$name
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+# [END files_get]
+
+echo "[START files_delete]"
+# [START files_delete]
+curl --request "DELETE" https://generativelanguage.googleapis.com/v1beta/files/$name?key=$GOOGLE_API_KEY
+# [END files_delete]
+
+# [END files_create_text]
+
+echo "[START files_create_image]"
+# [START files_create_image]
+MIME_TYPE=$(file -b --mime-type "${IMG_PATH_2}")
+NUM_BYTES=$(wc -c < "${IMG_PATH_2}")
+DISPLAY_NAME=TEXT
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${IMG_PATH_2}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you tell me about the instruments in this photo?"},
+ {"file_data":
+ {"mime_type": "image/jpeg",
+ "file_uri": '$file_uri'}
+ }]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_image]
+
+echo "[START files_create_audio]"
+# [START files_create_audio]
+MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}")
+NUM_BYTES=$(wc -c < "${AUDIO_PATH}")
+DISPLAY_NAME=AUDIO
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Describe this audio clip"},
+ {"file_data":{"mime_type": "audio/mp3", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_audio]
+
+echo "[START files_create_video]"
+# [START files_create_video]
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO_PATH
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+# Ensure the state of the video is 'ACTIVE'
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Describe this video clip"},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_video]
+
+echo "[START files_list]"
+# [START files_list]
+echo "My files: "
+
+curl "https://generativelanguage.googleapis.com/v1beta/files?key=$GOOGLE_API_KEY"
+# [END files_list]
\ No newline at end of file
From 353dc4fe860ec5a2f401c290e4fb5c580bcb4ed2 Mon Sep 17 00:00:00 2001
From: Shilpa Kancharla
Date: Tue, 23 Jul 2024 10:58:52 -0700
Subject: [PATCH 21/90] Add other functions to count_tokens (#482)
* Add other functions to count_tokens
* Tested count_tokens
---
samples/rest/count_tokens.sh | 143 ++++++++++++++++++++++++++++++++++-
1 file changed, 142 insertions(+), 1 deletion(-)
diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh
index 867e787b8..5d4f08d14 100644
--- a/samples/rest/count_tokens.sh
+++ b/samples/rest/count_tokens.sh
@@ -1,5 +1,21 @@
set -eu
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+TEXT_PATH=${MEDIA_DIR}/poem.txt
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+AUDIO_PATH=${MEDIA_DIR}/sample.mp3
+VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
+
+BASE_URL="https://generativelanguage.googleapis.com"
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
echo "[START tokens_text_only]"
# [START tokens_text_only]
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \
@@ -29,4 +45,129 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:co
},
],
}'
-# [END tokens_chat]
\ No newline at end of file
+# [END tokens_chat]
+
+echo "[START tokens_multimodal_image_inline]"
+# [START tokens_multimodal_image_inline]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Tell me about this instrument"},
+ {
+ "inline_data": {
+ "mime_type":"image/jpeg",
+ "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
+ }
+ }
+ ]
+ }]
+ }' 2> /dev/null
+# [END tokens_multimodal_image_inline]
+
+echo "[START tokens_multimodal_image_file_api]"
+# [START tokens_multimodal_image_file_api]
+MIME_TYPE=$(file -b --mime-type "${IMG_PATH}")
+NUM_BYTES=$(wc -c < "${IMG_PATH}")
+DISPLAY_NAME=TEXT
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${IMG_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you tell me about the instruments in this photo?"},
+ {"file_data":
+ {"mime_type": "image/jpeg",
+ "file_uri": '$file_uri'}
+ }]
+ }]
+ }'
+# [END tokens_multimodal_image_file_api]
+
+echo "# [START tokens_multimodal_video_audio_file_api]"
+# [START tokens_multimodal_video_audio_file_api]
+
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO_PATH
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+name=$(jq ".file.name" file_info.json)
+echo name=$name
+
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Describe this video clip"},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }'
+# [END tokens_multimodal_video_audio_file_api]
\ No newline at end of file
From f8b049f813ac0a926aa480a2282371ee67192739 Mon Sep 17 00:00:00 2001
From: Mark Daoust
Date: Tue, 23 Jul 2024 13:42:07 -0700
Subject: [PATCH 22/90] Add pdf samples (#484)
* Add pdf samples
Change-Id: I835c4805081af3aa6ce26a8871a62b5c435f18bf
* Fix streaming video
Change-Id: Iec0000da192231a7a5f97faabaeae9d3ebe64475
* format
Change-Id: I51705e0f3b96d825952a3183bc55cbed5cb158c0
---
samples/count_tokens.py | 11 ++++++++
samples/files.py | 8 ++++++
samples/text_generation.py | 50 ++++++++++++++++++++++++++++++++++---
third_party/LICENSE.txt | 3 +++
third_party/test.pdf | Bin 0 -> 821662 bytes
5 files changed, 68 insertions(+), 4 deletions(-)
create mode 100644 third_party/test.pdf
diff --git a/samples/count_tokens.py b/samples/count_tokens.py
index beae3b288..74a9e4881 100644
--- a/samples/count_tokens.py
+++ b/samples/count_tokens.py
@@ -167,6 +167,17 @@ def test_tokens_multimodal_video_audio_file_api(self):
# [END tokens_multimodal_video_audio_file_api]
+ def test_tokens_multimodal_pdf_file_api(self):
+ # [START tokens_multimodal_pdf_file_api]
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ token_count = model.count_tokens(["Give me a summary of this document.", sample_pdf])
+ print(f"{token_count=}")
+
+ response = model.generate_content(["Give me a summary of this document.", sample_pdf])
+ print(response.usage_metadata)
+ # [END tokens_multimodal_pdf_file_api]
+
def test_tokens_cached_content(self):
# [START tokens_cached_content]
import time
diff --git a/samples/files.py b/samples/files.py
index f5cbfdc0a..cbed68a1e 100644
--- a/samples/files.py
+++ b/samples/files.py
@@ -75,6 +75,14 @@ def test_files_create_video(self):
print(f"{result.text=}")
# [END files_create_video]
+ def test_files_create_pdf(self):
+ # [START files_create_pdf]
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ response = model.generate_content(["Give me a summary of this pdf file.", sample_pdf])
+ print(response.text)
+ # [END files_create_pdf]
+
def test_files_list(self):
# [START files_list]
print("My files:")
diff --git a/samples/text_generation.py b/samples/text_generation.py
index c4d6adccb..aad0916f7 100644
--- a/samples/text_generation.py
+++ b/samples/text_generation.py
@@ -96,6 +96,17 @@ def test_text_gen_multimodal_audio(self):
print(response.text)
# [END text_gen_multimodal_audio]
+ def test_text_gen_multimodal_audio_streaming(self):
+ # [START text_gen_multimodal_audio_streaming]
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_audio = genai.upload_file(media / "sample.mp3")
+ response = model.generate_content(["Give me a summary of this audio file.", sample_audio])
+
+ for chunk in response:
+ print(chunk.text)
+ print("_" * 80)
+ # [END text_gen_multimodal_audio_streaming]
+
def test_text_gen_multimodal_video_prompt(self):
# [START text_gen_multimodal_video_prompt]
import time
@@ -111,20 +122,51 @@ def test_text_gen_multimodal_video_prompt(self):
myfile = genai.get_file(myfile.name)
model = genai.GenerativeModel("gemini-1.5-flash")
- result = model.generate_content([myfile, "Describe this video clip"])
- print(f"{result.text=}")
+ response = model.generate_content([myfile, "Describe this video clip"])
+ print(f"{response.text=}")
# [END text_gen_multimodal_video_prompt]
def test_text_gen_multimodal_video_prompt_streaming(self):
# [START text_gen_multimodal_video_prompt_streaming]
+ import time
+
+ # Video clip (CC BY 3.0) from https://peach.blender.org/download/
+ myfile = genai.upload_file(media / "Big_Buck_Bunny.mp4")
+ print(f"{myfile=}")
+
+ # Videos need to be processed before you can use them.
+ while myfile.state.name == "PROCESSING":
+ print("processing video...")
+ time.sleep(5)
+ myfile = genai.get_file(myfile.name)
+
model = genai.GenerativeModel("gemini-1.5-flash")
- video = genai.upload_file(media / "Big_Buck_Bunny.mp4")
- response = model.generate_content(["Describe this video clip.", video], stream=True)
+
+ response = model.generate_content([myfile, "Describe this video clip"])
for chunk in response:
print(chunk.text)
print("_" * 80)
# [END text_gen_multimodal_video_prompt_streaming]
+ def test_text_gen_multimodal_pdf(self):
+ # [START text_gen_multimodal_pdf]
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ response = model.generate_content(["Give me a summary of this document:", sample_pdf])
+ print(f"{response.text=}")
+ # [END text_gen_multimodal_pdf]
+
+ def test_text_gen_multimodal_pdf_streaming(self):
+ # [START text_gen_multimodal_pdf_streaming]
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ response = model.generate_content(["Give me a summary of this document:", sample_pdf])
+
+ for chunk in response:
+ print(chunk.text)
+ print("_" * 80)
+ # [END text_gen_multimodal_pdf_streaming]
+
if __name__ == "__main__":
absltest.main()
diff --git a/third_party/LICENSE.txt b/third_party/LICENSE.txt
index bd47e3837..6d50e78f4 100644
--- a/third_party/LICENSE.txt
+++ b/third_party/LICENSE.txt
@@ -8,3 +8,6 @@
* This is the first paragraph from Shakespeare's "spring", public domain.
* Cajun_instruments.jpg
* This image is from Wikimedia Commons, a public domain (https://commons.wikimedia.org/wiki/Category:Musical_instruments#/media/File:Cajun_instruments.jpg).
+* test.pdf
+ * This is the first 2 pages of https://arxiv.org/abs/2403.05530 by Google Gemini Team.
+ * License: CC-BY 4.0
\ No newline at end of file
diff --git a/third_party/test.pdf b/third_party/test.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..25de0918fe997f2f90b6aa5dbb5ee7a6d0a2e098
GIT binary patch
literal 821662
zcmbSzcQ}>*|39ZA9A)pqF(Z3rBu=)>Y_fOAmOYLlTSm4>ArT^b9eajsg=9!MPadGoQMMWXbE>7mgk09>orK!ty
z^fHgz>Ni=0(Fs)$MF~E}0=Lmg*M+NtYL^i`RwXn%Vg3@I3Ed>`*{QxW=aPnX^SgAO
z$BeO`RpO+IyC0S9XjxWy)F_0->@(>)kj*i^CeuB+<@w0JtZDG|am!_kO+on$hTM42
zqFciKd(O#hwv}soZycH0HyM=r=Vl4pIsKF}8LEzG4_2)XhAhe_>^U=abQwSMg^-;#
zI$yTV_Nxo-=Sf}x9zRWYTXy?;?7CP{o%nYB`%c9~rLUV7t1!oFmg7@OeP5#ElZ}Dz
z=|{px9Xv|0BEo^E#(t%_crpzb&-0F7_@`D(SUySo=?bfpk6Np|gYJ+H(e+%ut%oTXm4XL*<9b86;r&7cW4E@T(pHp1tB9d=
zO+=X4I%}$<2LbzrRh7;!1B@+iS2wp`Nu~o?LQc<>hB0Rv8HDr2=&(%{^jaTcQ#AiN
z;nrbr)YX9Lws!SjN){%4$wc;f?%VZ}B(yJ$>2+@xYHo6bv0b{RTL#j2NZ`@I($CXF
z7KW~0H;F?qT{ZJ**qvFCX~5IjPmAWN%otsA-Lou6A8FO(NJ^81{m*1N`?i#9427Pl
z)ukU5jz9Cq7#>Uy=0zl{T_uR!=u8}BU&+qMePPc3Rz{WkyPenJW(z}ye^Wz?L_Mns
z4cbvbosBSUpWPFIugllw*YFX~W1r4F-o)*F~`*C8YY5mn?w{8|=X74SU6@l%~xqRDBH`%#VUMOYT6y~fRpW5q_ktlY5hhuLsvTa>WmcH>i
z$+`Mjgc@9JRkMw##Ya>z#v|1zF)iz7*kPt_CQd)PqA%A2x-zoGF2=?sMQkQ|+tIZ;
zm$EEw#)GS;2$={LZ2jlWG-mI&s&f_;FDJ-yGIJm0+tYM^9QzVkaE0{QxEsrjilbGu
z*fCpn2iJ1n(T2{e&tE>auD)+CyQ?DbUba#_frZj_|DjjutAtBsyy~`ZqP5_{>-#Qn
z;=Kn_kLg&Q6HPvJ&xo*Gc6rF3)W@jEEhWRs!)4NK55p8UD~qvS`IPaX{MGCZ4@
z7E=_kQTyC=4^lhll@gVE$+~G(ExgzBqf0H{zz19i2G>(Q&VXn
z7R`TF(2bHlw3QT5DB~B3{vuq{-%O%r>$WTYj77
z{Z;%he?RqL!`HYFx|re8gjZI65jsj+BO=N&El;ts6gH#Wos|^k;d3Y6Uin(5BMzg_
zg^6z>7+*vz?VutbnG>-Z@OSM4?_l|yn+qPB9jeR=tVS1iv?9b00qyte8te~MRam)&|a
zonK7-Rp?V3>?vl1^libTfjLPb+0zas^7n&Rut%{9rEuaIag#p(Fn!$@Y-7XZpF0Bh
z3YW3C6Nf$*?xssp75RL#)eKut$QR|;61^fR8RIvo<_ypm3*VZ#dhi}U(l{PS!Ff~y
z+Qr-5iBk+v#JkEWkr?D&b!$as;CXlb)!IO@Rs5P__ZN;#RJt?54#(LmLU;H=NHgIT
z;x*LsHY2IbW!o@e_zz~E1-B2w^G!PrS57E4J45@FBQuvka7T9p6%o!vDy>39t~TjI
zjYvXvxfv6oDk>aV5NIE_|X$x){D2AOxcf(mXgVkBL
zC4d2x74)TlXL|?#`IR>CW3#spM)h#oNB5=)H>T+MT-JiVig3So>7u?$v%Srm$(dW}
zksP&;5E6BW*SJLAk8k1^L73FQ$$A%I0F|G42F9IEO9&BaoTk%XvRsgRGUrn=bYRFI
zM)~bq@a{zTh!JQ92RF=b^?h(WgW0Gtsrc!{JuHhDwCnZ$pF6N7?q-x|8Y5`5xGGkS
zvCM;(wO$Q{(9cj$l%dz$p`!+kQrvski=c$H9^V#gqf7%V8e!d@iILnW16ci#Rg^`~*&;eO?$wkuKnN(Il-fH&v|&L7q(+7*IK7jfd)00kY_oH`?1|mv6-6hr
zSS|R-a4(&+ug%mZy86V^FMkylDIzVzkI@nFYQeDpoJkXAjax`AyEo(9{0e$oLsCbh
z(`o1MNQjx^1kOZ+2uU
z=?!Myuf57vbiLO+ZJ(IAGWuO&sIb2#zth$1Cfql1JO4*m6GYK6DSsiW*V$S+VSO<6
z7o@OwY=~2orto_yLk|N!_P}+_=bL7sVCwtWuO*GQnWukTqgaAJ4bR&|dl0kL7h}#G
zYLTZCWMPz7_)+X<#u8zbpjbkhTolnG*)@0BDV_xRZO#|a?`dE1W4mH{#H%tsEZJVa
zf+_NJ@`hQE+ikNQ-p4M5>SgaYCRHgiGz};UN1sF!rIpp?$%%#~IpF6@efcm(0Bes$
zn>D%*U$WIzsnlg=7nQ~#`G6m*rh7#-o(PBaHjBJ?-qJg!2hI$aBpws3sd+6V-8jO%
zyc19LlsdNc;b8E@4||T4vhP~7-{@MO72A-d<$V&V{Rn0qWxHow;BEF=4?nA|Df^`3
z(9y0NuXK-MoF=j~mwNsuD4e~uF*U{T?snWh9jA13waQzf{Ci!C2-AY_1>XC*D{*TQ
zw|uNWyuRbgmQK0ixbfX*Pu|3OSiA5^ee3HZvw;VPc^Z#2&|KH@&6IL|y(&d>`1q^z
zZ?dkN^iUU128AtE(aEp>jt8ZjEV#%b#`*llrHf<9EChzrP
z`a6bStSdWhdgEW$8!Gx4gi9S_S#%G)@_92p8TC3RL9IVOCr!OTe=!R&KG
z__u9G!=(C(dI-!<283nSeljUDy}--=K4bopWCo&Lr$bar6ySLsNnZ%6MLS
z@3!Y5g~K_8?v)$Q2t2zp(_9kGZ!|5wAd}GkE=1t6CR3v(XaS0)>%qcP4$RUSmFeTk
z3d_J)4XG5cu1PvsQvOCgsaHeDGuYw$ike+jv)~JP3Nh`*jmW@HW3L4qw>IfAXw}h+
zd}wdH5_oB>7`0p;u_8MePMSV*6>8KbpjC4Di;+jwk{sspW91O-p5VK7c96`zxZ)n;
zW9&A)=7YH92?}&y!QSyh_$F~B`4m;qn5SF7or2ZpKSF48$+$8`^(AiyC8{_&NRd>c
zQ;|i|rKWIBElQ6wN4hqYRi&GJfe=O(e0;n}SxkuD)cy3pktZv}cPUG%F_e^{NJyR1
zkGtnDZ^m6YgmC+WtR-@01b@A95&xFQ(c(uEr>CNi+cu*c=CRZtpVeLeT4v@>
z23xn6$^(D((eF9n5s+|#eSaS745wqkVp{xSLwtsvI#FvWCJTZ|d*CFg(NyuMsqnb;
zw&bh$$X89Q#rqy!^ugCO5IFc1zRxDMHZe+IT`w5hMFixpj!!&;JfB~^3z@yk{px=D
zOnvwptyIE=V8(~>&;9IjKfe#FsD%?nGGpg)Xb+fjqAipsNhOp%ZDPw-7r3e+6dY&Y
zPIg-@Kiq2cRVQuX#~_}h9^S^|*F*1r+<&`7taE^sVo2(?RJw3whWJ>?wWlRQ;4U;)
z401^yamL&NDY
zYw6HosB^OVesQ^|Opq&dqhoL@W|utq^61Z%UrG&QS8rH
zdw(YH!i7?(r>zVLXVX3n&ekX~DZI0`8&+RE9m|#+XO{eEPY|Sot99gE-=aTF8~pCY
z$n`4`9P}lbmyBsuHlSt6!$x|7eJLjSVtF=?-Va;rtPw@&fHD2LP4ftd+rrk^J;*t+G}X&HTSC{3{{jcZ{1>H
zbN1gMZ%E={PPOK2@(@pnliuEm%~f~XV4sbS^^zEdRsh(>NjJA?bHAC$IbIJ
z^Y|QLnHb8~Hg=upNpCl<_7nFc2eWQ=QKM(}w+e1PRT`~1OzniS3L47acf5|1o^oJL
zt%Xg}!p&w>nwL=W<&ybx4Q9I3#PtY2i;aa%I;Dy*zADF$4K(If1b%imv^!M|{Muy^
zgr|2|C!1SuC)X~MS}iNP*@$mAbqCFLF>oyGVZLsra0<$c6fs_p(%JUzn1~@xc&IM;
z{fbCLWR=o5Mnb~aUcmG&sPLifw>~C@Q>fg`@y9RwA=IA|-DpH83k()RJ96bETDM;n
z9K#VYB?%2IURXk*4{EqrU*$@9n?zEty;I7;4~k8B6?X70Do{VRhl1|Kly1&on)pDq
zujm(ut0;30ciN44gW`jyB@!o#aWAB=LY>>(CmRvMnQJu@VdFX1lNiDkY4H5&LE4E$`==(~
z;RtpyN6fpO>oAAY*bh7w0W~CZT;E+2oSXe6;Zpkz`#KleXEA@Kly_P7keT%wckWv_cZc)>5TJ!+U8_+=cKSvYjLJ(QA-T-HCFtAJdB-dhh*yacXGCK4aVU
zhy5PyWij)q`9#MpOEu2hT5GngW${E$Z{Ft*)f^8ytqP9kATnCenXDEczL(IL{?i=1
zLiuU9a@;NeN9j5b{-sR{9{C`;E2A&TKFM8idRDcV&cZ(RUFbgKM$O6fEUuuioAjnS
z#x&9&XnppabV3@2iJHBYj``Rozf}pPGDGPDMc#$f@VkvF4C;KPo#|e|XrFBm)~i~j
zu9lRXRT%}pdUkjxT7rPh+X^q?%CFsZY5LhB$8V`OnYe@Jh$XD4>YfY0zTNFq))$^c
z;K&Y;&+Uzaes(dY-mgRVF#H)MV7Bz6c9m52^g->h7REU5_XH5RuS2&)
z3xn6ET$ah|7@O>e>=iLQV!01>>~(IW_u{FQ8jY4SCJrgmVUPbTj1pk-4#?J5r)D>K
zyCG1L#Ar&6siUwsv|BKra^3O)ULbwh!WDe&(;mO2BUHHD7EJLbgKT2X
zdLJ>(F*7!Z1#aV9hWt*+R=sA@}8i7sziyR{&BNUXTug&g4RV|p`
zb64bebVu+^uxwf{>u~HW9&|AdVXa72&tV!o=bY$>58-Uae_W5P#h8lyg5dG)0B=o|
zgX;a=6UO7=5B8VSuYc^#(mJLk@KVZaRFd0z#c_Op{M)TX&mZjb+*i!6PfgQHN0~6|
zwSJ5!vu$d<+xp!yf3-E0K!_ckW+gf2@FO^r_7`cuN}m)3>wbNWfWRp3eVyS>(=EKO
zHn*R1q>rWr2q%b%%`>PaW%Ttt91wjcp;0}juv?l-#&%O};>M@XY~y5B2{^cAM?@6w
zT&d^xO?5J!K6%HyqV|i{eBv|SFURJDAv}o+!gm(lhnU0EPr8?%eKg*){P8S$n`OXk
zd~v5sER#l64_Ah;|FI|W=DX)^^l-%NUP%BS|K%<+@52ODe!)eG?)VgT;<*$Ywyy*7
zxr~;d0jK}T9FaY`^6c5@Hg%xcOR&~hVa0UNE`DT*b05#YS^cUZm2{^CMc|P
z9DVVTe#1Z@z=O!S$x{d0r4CzvXjn^66CKxOLQB`t`BPSLamKD{#I;9jHHhX4E4(&X
zP`7h8(i6Pm%l?z7G;w6OsW)exL~JEzF$~V|?63>p*Bf)$^zAk5HH(&zaJCoDi|ori
zwd|re^JalF`W88R`DtVdnh3j~65=TuGN+!{cUdaa
zk2)mv2jbV7*BA#HHRpLe-(N}ig`mcaRgpLRnDEJ3e^X1r$n5iti680XBQdjezvo7oln3}(70Rh_
z-9GdJ`Wk^u;C%L5fe-U5_>od|%vvp3W|xS;}Pk2%#nc%VXmZuy|xNXY@t)je)O
zxqnNc?jZo-{w<-phaeQ#y7;rAwz7$}x#{_$a*w#7e}B0U4EgAfVCSm*^AV?-tBK2*
za?eDAa%w#^1NKIE1-ZGPoU-N*Ev;Oj{D96l}&(&K9d{DDgMyKS;gE*!v2wiy&ckGf`7MO1bBq_fc4LJXCVLBok87@`8nS8fw$Hv
z>pQPitS+uB(?@hlD>|83`D?Ucp5rAi`RE@@N~Kk0`q$)x4O%E1$nJ-{z(YhG$vr(;
z$2-A;KY;rk7oMi<)qm9%fun)2p#J~kWdjr<6MdK+PsRxS|M(gQdn*kxTyOJ=@r?!;
zABKkhzg>zkw(!Wk7`inregC%)&q!qxt_f~EsGV2GQQ-aG1;=`dr7k9QJxR>$ZX)=9
z6U0K*Mue~=Y@xQ7KAC}<{TcMrIvfwPb799LP
zS{zUntPr7U`2-{7m%hNt{|9xL5^SwzWjcDc3=96h8y(+@RQrgl$o=qBS3B(g!|WRv
z&B_aetI+55rn3Jl+5y=S5s2?US^b6yD|9qC=>PWxp3|!=3vxV6$8g%gFQL2mo+}q|
zdi3oyukVyeAN)o9zuT0VY0Z1|g{3?W@z=0ssf+hv8X0{AbB5{~QckoN%m3S5KApAln5X`#1{K@wJ
z;9T3_Y$0cP`U%kXWA+e&i$Ks$Iu+-%1K|?;@4>~t0Ji%|(=JsN=9Uw41;wTApewN~
zzsC;wKR|5*BU{8wUX2N~UmXiZVOt#^U?Wh2j{d*10ZaAoxT_i3&5mDAm4to42D})Q
zs>)IA0+zWj)xX_wktQA*u}>u7kgtk$q8O1Y0Wivfu5#7gfBW)6bBhFAV)geLT*02V
zuJ59R^&)>@4Ki&rUCIBxMg@uX0vCH)6=Zyc>*dALFe42>rG-@+TM_?;iNA2m=a}-e
zcrlFw$v+9r^g{v1iH^To=jqP|LliZmu=#fZ7i2BN)Z0uyqVYi9%2|1dI9i7Fx>xu2EP$;>4dh
zia-T_@!O|yl+XdY3!sBckB{#E5FJIZnNwvLFM-@Z%3cWzDC$U*1>ix%&h#FI>wgpM
zXlBKVd4!;kU@9>50+0v@01gA=7fS^HW=|+=>f7mzSo$g1b2!U6ZEZc+!j+qH_*Mkk
zM;P*l7up@bak%Gkz?gj(1;Qxo0?u&*zQEhj+bb-U$#$+hR-BL5A?8;=)*#k;izTYR
zTo`!*`cbZL=KI&oYs9hU4cZ?lnIB_5e+Ll&vR+W?c!IbA$1p7UcoCF~f|1^=WA)`<
zy_uYB&*f-uU~1h~{oI3%bl}QucPYaC^AW|I#*`P~C{Y`!a!!u-wEi%5xU!fox4TNl
z@FbKS1q%Z>S9}`oU>FtX<}kC%xq~?{zU=ySw|70CCNnj|EeTG+
z$3wn31$Ef76j|?;C<-|p6tlS}4weQp`L6e0cnCrj2{h}I1Ix24`x5^qR;tA
z-9mxNJ*+FfOak?_U`kp7D>5#>h%2PF$^~3bMPm(LNxFe7U269Z%_xrziH0eX`?l;s60wD@%}N;rej!pD2p7RGQs6Oh2Q;G4M0
z^&81LmawWJPXEZua%Yfzi9tNcf5IcOkUBMXb3wi%#6JW9F4WPzi16Ba02IRP(Y^ra
zph8dMNW)bR;7KMLwP
zK>;*VFDl!EVAcLpVasH>cl@Sh`Ku%$+0w`)W<}iRv1)Zi&r$n*Y}Okogi6LVPJA~C
zppLK|?j@xml^l$<=na>PAxDpnkhy|lA7ZeH=u^Kmsk=c}u)?vtCpL{|WRgQG1;VEX
zB)MyOagfZw$`Lt!L;9F*O%0mno=Q0!Iojj_?{f
z9=s{QA!(_9&Bx3k5n!-(j7|=VtY+{v%%aV
zIxN}bnL3?j^BGzNv96y~DjjX&=dYyu-Vn(RM}R@DhF=&iqMEiMfUmswg2#0J{`s(J
zfp~@YoX~1}>>v~l161i;>)73PXQaxC|$sk>Z>gH#AY!{`*xb;We#R#T|5
zLvS1^{u(Kk*Bu@L6nxSH0f74Ri~;g>73i+KyhS0MQNzH$A3)dNxwD3dAA&IY=p_}tgzU_{(1VM$LHsW
zslwdD$f$lS5I2aMQ^5j7>lqIKg-y7OBQwt^(}J73pk!vyCz*^N;J45oG`tk?duy5XcEL|aIgOPBrer&c!PEIC!)Ebk!fKUGI>F=a?
zRtrPrGde9d*l6aZGVc2)sL~I5Xp>Y)^f=S&ovBBbSn!Ay5Q$EFchEVNm}E86%hlGN
zSnC~m?_>B|R6JDzN%@l;3sQqy*bKg=9fQ2sTaUz6P=tqSA!+F`TF6Z4WOObsCTvzl
z?^)t}rpj!6z)x(`6y|C)Naj=#j>KH;w=!}bRJ2y
zKS2X^LC&acVzV6zXFC2B9-avb6pbY#VcLO!Y9gz&D)A?TtldV=Obj*Ra5&KtUJmuJ
zDZntt^V7UCU!`i?!rEzRj{AU^358`NNdOr1p5tMFSmY2hAN`IRA{_D+AP%0Tud~b4
zO_fE{%DhW01dF81TXY9fzw8EVw@gIb?h5U$=ZuCt`Gog~LxaolyK1(9wc~?iMI8(#l*Tzj8IaG%
zZXD^gi8yD#2k6;rK~U;rBUY8=OO=mS)g_GdGKN_f6CT%?H$mpMaN$>3&fT4h*1(PBut|Xh
zvufJK2be%ui#TUy!+|W;)5%_xyG3!Zmfl%;cKlDB5OwcgM?CT#r2OM;@f!?R0LhDr
z5hpYY<89M)8wOHHY4e*6Huqk8%*()l7)Yen%GjriUp8k4-3fU=*
zj77|ikvejJ|8lg#G{^6mL5HI=Sm#d9MKF3glX^J%hAM}S$&%laecG%}SEsDanpLU(
zkN(ksvNBb5*pQx(&L2Sssx;K)IV=W+)zn!bt#)28!SnzkPL3MWikvE2ftz9MrF^aw
z<+G|UpCZGCCh6#jxso5W8d5?{L$xaZxZ@t?HAAvANETVZ+65V)cl#6D-}u?f`I}=%
z8i3Ao+!+;Zos5;7^%q(Zlg=1F>a2Y`oR@G*65@|={1$`H%Y$OD%cNQ{_~pWo&>G8&
zS&9hDHjFp%)tudeAdt7DkrDmhYE{
z(ZS6aE_Dcoj$+VsA;I)Ts8C>2Igr?EUKY1024xxY)UT-)4RH^NWRWIt0a}3-0W|J8
z%oGj`-GTmyJJJY8R{|;F{LmWifES}^4GgqpLgcASNJqGRR;U(NASOuXg#UUF8pszk
zB=+>MLxln^k8Z$|z~)1iXL_=+%ip7aw$MCCc$H=TeFX8=ZG_H;jbMgdU2OF+AogWt7yh&9H#=Ck0-~B92eC6CZC=$>r
zy0C>iQqMkgI`;Xe3Wtfuh{64(Xp#cVQG+WJK%nht7t@!*!HEVL
zi}EwqA3VzH!N$?Q-^73iELBwO-}6=G;AZgkz3$NH6=i=rc>bMD0ed%FJJgyT*$++Q
zNCKt4whU|mMj2)a^<%0Wh`}Xsd0172c^}ij0JhdF-Rn7
z>%bNw=-o)1dOhZQUIOQbm_RxY+8IQoP@J<57y=c~S0Q^;QsQ$1jbpRzHNG+bBM&m(
zS4U-#6t)F(cLjh*3~}9(hT>N_a!y?NMs`-Ebcpp*x+d!CbbHi7Bk$+Rq)Xg4
zlPT=kIH>h1R12_8M~eqV^-BT*TpmlG3Jz;MNk;S+ng>eB4Z
z492oA%BSgGNFrAL91bEtRZpG
zN9QovbY38yhtP}%8FTbj@3|+4$Ab}fGlqNY6}%Sj&-S*i2{%cOpZDrT*!f4)-`&F1
z$~MYYM6~ZBOlbMM^UReT+fVKW7Q_TxC=&|hYhKW5;(7gr>^NY*5wgI1kOT*3U5w=^{6qfb)bH|*f2Wmz_b48MW4A?6LLME@eD>*ISCDDY@S_Oy1g
z7{DWhq|ql%F2%)wu*Lj1-I%FBBjLXtbWTt(7
zNIUGcLnsfEWkFxPlVo6c?~q49__Mix*V}Wg_YqVajoh9y@s~UN-rY}si6JSo3f6^N
zI7z>M5l!*Qfl#;sg_a}JpC*Xqn}&gs_0Z=|R}SNm%8HGCq*eco@diSXk|l@nc?UzloH^eTDh!y`I-p{dnNjkHmCRJ|eT)UycD~9*$Qa2MkghI6+)u<3ZLvg(u-QoCwUbd
z%@Ys19~M;qxbJNT42+CcES7LlJp3`>;pGpw34uorB-*n$3R+EsEv}@(P`MBiTBMfF
z1@sSqA#w(`Scp?QGB}^t-8Tg-ayE+^hj&b{9WscVjoR?ZDh}I3PO4Z3zh9p>^Q-E=
z?~MWfMe;$sWb2~5_lLa(rQg9)%BW7c
zF@dU7h0DSb-j&AtttB)swHQM3>MZW$vpwIia1_e@cGLC-SC048g7jP#*tak;>mW<#
zuREcG4FJD8y4Q>bJc3G3ZB#eMg>fJAbfv>O31yTz#V;x-AX?+^Q}8<$kV0C4wJU_4
zuHw>$#KD2*mPNVdexToWrr6#v?54Q7Q+x;
zk~73gc?be?;G`=`g9(svMs78%Y-imD<1;h#?fo)A6p35KN?p|+&u3wJ!1Cq7*|^Bs
z58hCU6$(*dG~^en`oS}c?AS=j*1VqV>5iGw3TFO%bx-jRZ)Yjd2m`=G
zVNoa2Y>ePK-%pDIb>NX^+GEXImA#F%XR^($to;&{mzvQLrPEjV%_5Fs^8Q%Xn-laDb`ZAdkj2KMu|CJo7{RqlUdm>f9h%;ft~`)4iAl-GJ!Fb%N4x=PY?bca{iifkNNGxj
zU4Z?cY2*+&Ql03T?#;<|Ve852!Hv(qcVqz0fC=(^n^wL0FP?C{y9oQ74$YTdI)-Bl
zcIhjC>5$tO-$0F*M+-nQagXlgq6~EaY4Vu>^ABmJ`Vzokn$Vwz9U~!TA@Nsts?16+
zvsFi;6i;hVU@Y-o?hEGCm%54wGnt{A#a{>qIFYgfU8JymTiKM_OVu*QOw^(b`QsD7
zacCUek$V)u#EIKD-Lg%U3;koXPvqten5r(i-$f$wqvty7JEF`h$;uhiD^$HA--P>r
z&m_g*>*ca}kf1K^Ip83GT99PiMcsYocJ4qT-YCKAMdHpFq-iJ?If8AI6jBEX=PZy7
zER5i#{tIMa6dbMmu7Nw-P?OS|1Z&bsc%ZhE>%6Lhhl14VhLk++RZg$a@uCfrpiwcwCjhu
zW$zYahm$FC(0i@|^ty-;Rm<)dEfN$~G{|H!5gR=LkpzoB-u;w-WMB
z5}aBW@spVqBp$8|qQZIa+?At5aS=SZviy5;2I3gh3Ro2HV7;;B&6WF&a=?^TKqFE1
zBq)~Z{z)k>RwHsp;4F0fzWigjZj{W)bybmhTMlN4~RUbTE_
z_V+eWGlppGwJO||pyZ`ECjpSaKeHgP_W&HK7>>0Dc!HJtFT2I0hq{#`BCJ8_!}?}L
z$+Re)g$Y9}^&fLecR%(q1F?K_rceSaL(k}FZZ?#WHd
zDhV^QS!(9GpM`a9Gi%V$H*2A+{Ib``LH`9{u)1l1tRyrX&4CXy0>OU(p|%K2RGv{%
zO&P0DL;CR|g*mYF?cH^ZHcpV#V`DFBAAV$CvSm$p9_rz@;XCExtI=-QGzHF*`{>Vd
z{sURkfUApSw$%cJ{a^X-j>P=hzM7I57YRGOtU{hUBTe`>Bm(_ppz)c_?_!+_%9*&O6WAcwHvWPv84d
z>N<+HkkrtDrU@5j0YQ3+6m-6;BVI?hpC|h*tm`s9g_?Te=*9a!Ai8w%ER!ucr-Wqx
z!R&tQ*JTR7yB&mN#>#_V*MY8xG@9<>i?h3VWLtJ43kuk&?}o}QPIsMeC96ElGN+cQ
zgU;k4eZbw}BY4kjaDxr8EcnQ@otNt#KUz_^d#)^Eu)#O*kBup$PP8
zkm^^to8fzn1ZM{v(BPs#7Wt~YzW@}`x#lPV^~^GofEx;_rc?sUz%k*OhDM?;&1%5x
z=kS+MNHOi$1x&60qa4zLqkLMPtN;wn$@0Ql%tDNT^MNlu{n`O8`K^pv!${`ejR=Z|
z{Bz^wsgKycaI}XF@gTAL1UB6zzHQlyeN-dXjJ5BZBhljF5||b1-M`%$%WxJJ*NR=6JsI22CID;XVaTb48J!5EK
zQaC$Wauyw))2*$+xqjx>w>D+lq%)gKCAz3&`1sJvoFs>)t{4~GHTdiNQ{QVj_i`Vn
zV*^c=K-Yf~BnAlP6!EeE1RY4RPU1I=tDJ8YD6p*+5xKlf9!(#<(BKJ}Sk(ImgOE*;
zawm2w8TWDMATDQl2Kc;R0-$>Q%NM6FW=Fsx0Eo+^lqu%qz^WI)%pVpHl#eB|NyywI
zh-LeYCRZo
z0$^PQvNR=);(dS5Oafic0cMY(|0hG*ZlNa9D9i)U!km14@vE#G`#42&-+#9Pyx74t
zy(cCg-#STFN`-L$LqKFR0&KMl0QABXvD(G%6#K|E(F)%6?aVmM=o)a+u2
zZU4|Y4&LBAwTZg&*Zk*J#3ukIa_!>y$Z_BqHid^xbl}hQX*ULo89Bnm4UIVJRGrHUOYk^jyCam{{u1g0CD5D(aK1c#a*0pLt*3*80NzRxe*JNL
zNb0e1fiqXZxplbU=T<9BOjY$|H2Ft9M4l%B+~7S!E;3JEft*=w!iav{K!3{Eo>@dA
zE#c@7Jv2}PsF;SIB!G%X59GwHKUJha=l4>_>`B^dCyeue;dCxnugmrOzB}Jhrw&N5
z9spu}^;AWkI$I=tyMbZh_GDaJ3TDY_z?9o4tMpe1aAi$9`{zHEzSuK)H^
z)ZIwHnUeueGjN_*0=h9U#yf8~i>wR-9(vkU52fkPoctGw$O)q-{b0Iq9J~3(!=!Z%
z_K_J3-C+tw;UO3_ct361V&e=nn%EK$|u={jlOSnLAB
zWUU&_?;r`xleDLfqB&1Gox9dO3@p8IyGyu1gA;|P^k6Td+2CUUlGbW
z@^3<^=+DeY4JdO*B?~zm>7#wV?KC0D;O?&?`nkJd};aek!bkGHw`XGJleHh$Y~as6xfbJooPV6qnC0{Pj9
z1^t+lD?t}G`IxA0EPjC@|KjKpLIJeU>$R?BaA$_Twr4Ln3B3SRhyzGhITt6pAeSz&
z@q2P2g>fd`3L2oyI-AhtGod0trwiXsTGsp`VtK9r=X*}R`JLi5ohk-N+{O~YN<2zv
zm+*Pne|}z#<;JW;{>G2z?^9vJBQF*_x;_((YE0EgNETE)R&;!T-gL+GTDUbZEhM))
zF=Sc&I4A>?LCluE9>A5O#84RaDQdSFY&cLVuWz;RPxl{4j*Rnp^!QE1_u4`s3gk>>
zhBHqp!%9uf|Ejk^f+JBLWWm-?OQGko8A;_Ib}`YL)UH!y;TrD|0s0G)iN1Z0M2h|v
zu+6%6s_kDJ3IvV3e09&B$lmms@;7dZXka>w{5s$3g+ycg&PzDqtIRo&0~Z`77v1I7
zVw`}1;z-yGhbv|F42%#g3{Ev5!!y4v?j#?MXnGfTXQ=<7cH&Nmt_g3eM21g9qjcFv
z3etIx%=owAca3=93uJ+9C9&xJN}2v`C*B1~qd%txdwrxty~bpZ;OI>hoVTaX0s=zW
z=tw=t4I>^K-3K1|q`9Gd@mh?74ti7X(M(pnejs6l^S#Puo|b1@C(tkYVlM+I2(XBP
zk3u&*3X0;B-i9JJgXFx+AXE85L;W^!>V&vGo9`j
zOxZ=y6_&eDCFS@O#LtF*Ah9w`Yma8XBR%htn8$e26#p&9+8Uv($GfT<`}7#9Ws6O3
zqHFPbw)>>k)Dj}XPZ!g*zZcVUAe*=@hzm`*N`>Q?#%o7@_>^gD*+8ywj3qz3`IN&d
z-Lq8c^J;=x=Yb7zyvlc?rJO6qJPJ=?;pgG3Wvh!V5KR48enl{a+v6HM2LbfIbzKyP$azhyahbC;
zfG-bfhax_Faifbm&Poj_^eGUwmEDHVH#@1J=Gbzm6W}BpW}u
z*2LQ_3ANP`Qyd)ql9TNFOX48kLbBD)z~>>>98w
z=S9@mJ_`eU5IE!B+s0tw`8aUsO&c3}bV)Nhck$CCH#_vwQ$F%;-d&Rrw>B2@bquFa
zfq{h~2~H)GI4LMB;}}PJH5&aBkCYJls{V+6eX7|j#6+0ta;rG2_#hWt*Q?_8qr@9T
z@0t2)Zlf>rJC;_xz3q45yuJr7b|U*jb~8pB5uZ-Zdr{u^Y8+@Nkcj*JqiUAgAZH(S4Ma%@*j
zn?#Rk8s%QCw;zs=w(f%b6c~z&O3)u`ZVJ
z;-Br`F5kftlr5CoijjINTz$+js{tZRxgcHRx44_y_VjfND${mZ7H|zlTs>lICGCI_>{7N!
zD~EmO_&Mg#i_<|vHo`BE{F<+LwO<=m9}&QtICQ`FN@?E8Qj9(xn5>qak_g{93jawe
zn8RP|Oo5llu&JW)qe`+wBOg=U(bwj8BA2$D
z;`8IB=4<)BPiMedT1Yx-qPQy{<0m(ZMo2x5lJ~hXtbR(68T*Ag9Gg5XJra%%=&+HE
zLWe|Hny;ml3U!6d)+d8C7{cOjqnlTXUUo~V_TnQC>n=aMg
zeK!w&mi>n`gDt1r(uGTsEyM-^M6f^?sV}7}&KF&y@k>0Rw`>$=VuhCm1QH0h+bz4_IZ)l3*M8*==2Nm8&&!K`8!n>OP?8iPjt3XBXWH6pq3eVDhZnzgXI
z?EV9M$}j&F??5%>9r{DFc3UXmmImZHidIBNr32n)Wp%iiPTxsyzr41~H=~NYxU2yc
zCbWhBjxIa<{^51Wt{T3L4$3r8S{UHHY$No({93
z2QXPvq@IW;99t^3F_t8zzqMdp&yq6x+Xji~7VW;A0k7zxzUYVe)zXSZ!qad6$PQ&g
z^xH@|?`nsB82yWLc8GkRK0t=%*)S(3?m4=G?h3FyO{?Gk9rTKAeOO+*ol}l;EfM
zP>Y|foc#}S;(G}PuRKLrQG$xFn|*IB+6|4&8ZKPlQpLC++}R%ouX+1v$6aRHP&Ea)
zlpe4da1*=@K0Z+ARDHOR13H=$mhz3|qn!5CWaa@-(BSYMI?7s2^r&0A+!c4uPD1Op
z^2>4vfy(R3eTq)VZyW&5T1v7E2rBQ)9t~j6)(E{zUgFAHl;Kt+|efd3kck4MmC;5hS<)z+4G0U0r((*Jjaq$yu#Yu2LmH&w09D~5d
z>Ogk8VOZZ+kPU|qO4nM!xS_b;Ru}%hP9PchiV0VvHQmcwK?X9RZjAquA
zp}cOw(>wWm>GRehj?a}y@YJ+s$7;sFcKCbu8ZPbMP)o_2$*fp;p-x}-GUcF~FIH_|
z>o%s{J4}JPGv|l@UM$+L7A*ByS~FiFT(M~(STlmi7@(P+$1FrP6KraqHmM0BgwkF%
z2SW#f{B`Que4m&C$UsDEI-3_@0Oq6ZrQu6
z)nidYGfCb9L0H{}zlTY2N3KA>>*Z`_#fHyCJvijb0znA21^Fpe9t8G1o*~wNGD{*-
zdpcF?>~H5fMy*fVH3UB!l4O<89i9>JuTTM{#+?L3Y@uk4DOU<~fIc!-s#rAZ!-g8Q
z6JsiNK#+fbU2+>~h|z5rKn=2NvXdKmt<_ptaY!v~v^HEY=`|G2JxHHZixPNZ^CApK
zSXbG`vK1~u{Bb!ALY||iP5U4!?an^yfjd1_q9b%5e47?lm@wF9Si&Y!QZxAVml
z$C{ltn)P3@5#wK8AEh6$g7(OB@^$x9&JByDo#dzGxpI^b8R`-Xv9*;7#1~npkN4Kt
zdj-g~003#z2EqFK_0oW!9ldZUp|PQoh~`-4rx$NOfV)DSy7qy$i0S)N{>D^vm=t(X`qZgxnaT>f{CcM8>++U#J)z
zFdloDeUj9@>>=HP92<6z%wW-PsQYLUeokeVSkQIQ%5C>1moCZe4fdd<|2Iy|brEt9
zJ6%WY@z`S~;HKQ{N^b4T?^3Lk3_x)NT%~w`xxB#%mOUk=(#wX=}GT$fkSEoqhX4aDc9?80wV(1
zSu{qdeH4Qz$0uRV2Pa^1LgwToyZol!n6b00p`_8taPpX^F%%FQVTIPso}%
zB?Icy^5aiWH(4ti`U*LiexBQ^gr~HBjw9Bs)
zT}g))EY-;0O=@GchQx`(@_WG+@*dld42>MKiK2qLCh)hwL?JU4t77)jSUAU}hSBwN
znkituCHxHclchq+vDd|Ao}Mq|BTlpox-O)Ut;Y;K$WL6E5UFo6I!@&K<#Di*-46fr
z`mmf?@E3UD7UN%FbcAiTtOX^jH;WQ#uhK9!d
zKy%i7M^O+jQ+@5cSc%A^R^nm@4)sfCZz96R%R!EL1x0Z8$jD;=tXhO|Wl7)74M>4^
zHD)CR8%p%QzO|=qB*sZECC9y~{BWUqueWBF~2+_4UOq$L!=Lbz)1*
zDL1i;t5{71Ct!~Fb|6kYh+|U8SU0-Q9s`=ic8mDJv644}lHsT#%)#Wo39fXl`>Q|G
zY}u9Iq5C!jt~7)B#wipKpNLmQL1`g2P(JB79OVAmyYhBlB9B`+YLEPs{dbFFop)$r
z3E|$`>~2#yv+D#t=9Z7F9b9@Nr)_`87RZ?VYZx0?Q*>4cdXOA3*u#NxqOwT%ijbe>
z&Zx#8i4*Yjel@}AiPH_?7F93CA=97Mv`ERtBJowNgi&W7rIE`-aMp#|ZB<0ElrRhT
z3oMpO?x!=a^fByWNHC^|!l2z=S(dvWAN0R4hk+=V`Hxt`uq(1>Bt2lN8x?Za5rltr
zYJBhsZhbmt`{?&j34d$Mj8guwSs}bp*NGv|wd<;M;yR!&F9GAc1oH|~hGM*Bn_b|5
z521+JdMO@ZW^_Eh?ef}pbUlCFGDTb(Kj=HvXJLrE8Cl=xSP2D<=0#q;W`#T7Ha6U6jBCEAra-2cy`C(!zLG
zAdpjiKqw1}yvl!*vzsOfKhCPhcF`M#%1W<0;iq``HP)_Ize=PIDn0Yv=86kWvVOrI={LCeVH7U|6PvO+u51||B=?YxS>W|
zy&&PAY#ckpXFEmL;qiV1he%qT#JWj*s<-D^Dc|5o@JlV{W#c`~Qkv(%2F`9Id^Qe~
zYIC)|d@t2*f^T)F%NY@Bu$yD@gPHZhWnYn6ExTv4hTmd2K~dOsyX7ewMoI4ilI{ff
zgD3X;(@Dpt(irP;ncwH;T#^4b5G
zKatoNyk;bMp6ZhpIqIk
z{W*csc7MS=!t_`@0o2&ykyCR3uW)^cOW{i~R8>k5rUz}^{E~b^
zU={uoGhxar>NSxKESKpEZ5KlTf+y3Xo#9PeM{TFCfD9AzoL{E~PrJ~|HaPP=z(Uy@
zEYp>-R<64|yR4~%24xSlTHLEgb#wqIblsx=BArpAW59-ipy5JZCwpEW&NK>7wB+
zF$MvKs8hQO1V~plv0?o6O$F@`q)_FsKhBb3MUMC?<$Zbtbl_;cjZ8|^1g}e;+MI-Q
z%llC%pGKHEniQ=MR&>j>#Y6Z8MIZ-MmE@^C*b~#0ttv1~nN{uh=E1e*TSackdk3Fk
zO8fF89)^=)*-FU|*hXwD{}7^IJ!8fQXPLutEu!1a_0iLnITVxN!~bn*8S*m
zGUkC%!{eag3uZIrGa~?=GrqB%4C613XfO5>sw^8+8Pu0-m#22({x1Nd6C%-aR;1~n
zp-JNvs&uPUb&I>Gp5QTY`CP!tMDtA)Tx?%F7$3xmgio^25l4Sp0oBy5na?E&V6qB~4s`n&j|Juqf
z7N2UCP9?H?90uMec7*T1{*to4ai}vT`f*>WbLT;!zoDG?i0z9bbK0FW7$EcZ0BC2KVMEq^Xk`
zs4i{?Z@sjm5`;@Z%Irfw%njg-rSz&U$#xY)!gU-W=vdSJ9BdBSlUon3NVwsPOE&mJ
z&7m@duGVQMX@sYTZ!s;@x*yk0Kb%H*v{_nKASV9s6ttmqBP_Z;
z4P)|sBgXz*>oH>r1TK%)@J%>tKnfrRzGwoz27$`t`<21@LqL<#rh6uI
zz_(V<{Ag6Y`viANGaDVF8oS$DV46$8%R(Abj#+I}($A?^u8RW+OHlxPN8D)@%
zgynO}#uJdR{dK{&UVM=|6W|xvB0JfT!PUoQzy1nq3udBHB)mTzgKqA#2f<=$6Y+$+
zm{MM;F-k%t>e`KY3LmP-!^wNVFZ8rt@3E`(EDYep-9C~Vx-mR5h;!D>fHJb;lqI7_
z3XqGQzKFqu9X%CJ-NEYK-TvPx@et119V6MR#`dsbuo_|g-$Q;pB>)TYDPpeI>g%c;
zp2O$h@MNX$7HT{wM+TqCR5}iE6<_0%WuU&W7Fo+ycA+r8^APIvSXf5bBo54XQk)rV
z2_Ey$EzcAfI~JY`4ZP(f&o#e8qZ0JQM-tFf?k*aTbfNGw+oRxZ97)xUehz(Uy2X
z=J^N~bz@XNT#xqmfK7PK{^}UU6mkKg#DE_{!t`!`aSppnnQ^NsE~)?)d71IO$Ip!
zWX<(Oq{iytgU<&=)>b5uUE{vZAz_mDr#o70mHqgyOzImc*5z-kjWQWE!GH1sbVaRXKeF}5#REfVeMj=5tyl)F
z_ARMjI=sng(_rvhzHI|0a64=Idr~e`3>#+AwV)^My^Q*v%yhU@PsC8Wu#O|R1*YaeZy`!afrUUFI_&K|HUH@he@=5&H
zO`?FRvQ%=|wF3Mzb*#fx`k-Wy?b+fbN|fJfH)Y&Sw1C|zU9y^Q2>9NMt
zwq@E@WdBc6Wdu0pxZuk!G~PT>7Wteo*SF3I@&i!{DV5U?Mz!brFB-N)xTK0*F2iaU
zziG;gdzzwK
zt_-n)=726-!?UrU@8kM=BX-<}Mt~3!-!R@s+nI;WZ_7QPOrLX-xouWnO+cSh72+q?
z*+`ioixtJ81`5(scyMF1?UcpbJeX(w9*BgI2Gg2h5KOr(dgF9HgE1V}`aJ)})!tv%X`S3CH1
z29{kUJn4-Yzs;g*E)&cBVX@hEyflxBk{UpJYgjYm3U54^_#lT2-zx3F3`*W@7TZ1G
zW!MW~&dyPGpBjEk2|V4Xw0O)n-mX{4#$4Jnm;k)krQueYM(R0^+G*I#NC5v7F9Xy)
zDsGBQ!O|)DsDicDVR|maoc;=h)2wph2sNlpl!6Eb^}j8?$7SORac`%5GZPIs@l9VG
z@E(+u(LiOoUli?7@Qo#A+GpcJ#|~{H7FGtSI=@3X^4X_|ajpfN3q0)A@2)%S|0sq4
z1gU#)CqOto0gL(nb&Bx6QqEoU1~iNIBbEv>oB+}Bl3h*_K~wcJEO_ne3vrUX^A8ox
zU^Hth^t|h8X%x?d!b`J69&NJnkG1FdEHgRAyKY7AX`cI_DS&gH)4Mc^Oxe^}wxbD(6JY_CTvO?*kUI&7QHn_v4fwW{sZLuFox5r}ATR0FS2>c8ytal&b-*l}9;V7t)zRe#q`;OCr6sPjJY|;&=~TJ8;z2_dsS{Ze
z7B}aRur^ciT1OZf@@=!vV?@u>)8Gl;53S*;vm-6dwXl!v4NX_2sO){-{-qR|enFFM
z_|3T~rF!Ci?6DjMIbQA!^ZaA6Pe1>~bC|0R?6Hk0RC^9+)H_P{DH!iE)1F}50?qgp
z@JW?~QF^pNF@^_BJMOch`
zUMg0>@rHfmO!>Zz+oIy({yTX$`>0e+=b}oE!NKWhC$piTGUKn#K{{H`(m%5$PAqmc
zj|OaboPWP7E`%6YeHI=ipFZo?-&obrl5Q+-;ZqQIHq=8#TcxoC!3AHT+LKADwi>+C
zo0ehF>&c!Yaw8$rMSZ+Tec08z`HP&(G8my+TGSp48_IofUDOOzCqB2s-6Q*fp-@1m
zc{uW*1Pr&CPa3n+WGUH=YIc5dr#?Cw#CQBt&hsDPy{8MaxgFD2lMgZ$<)IYzDCdDA
zgSKu9;?*dgtrVK6d6o;okxJqj;j-`G!H!&1EJX!oah881x7`SE;~~Z@^q+T
zoqb$Aw)}V$;f_7*3RDD9lEQc>H9q8il>TJN_^QZ5WzUT0LN#Jwp9+17G&NUk(@24R
zT82>o#9>a@HVyG`G~jh27(T@CpE_jt%4X}0VvhZwz#P%!Sz%a4S`_;Yd}p@fh@X;s
z{bkzFen*_zm?TYsYHRSKVwOo!;uV$*ugd?Uu>NxI$6o4Wv5%*Mdhupt`%
zM#lPpd0PWXkgaq2+$iO&$A!zyP(w_U9!DD|;oZMklaH=k?||CfaVg
za)<(h9I#I}n%Pau;Q#GY_rXvIS)bo*Aqc>HafBSA3;E?D#Qob}u_tY?Eic3ZR)<0X
z8!g+97;&ElOza!SlYpxB%idf-O9nUtJmx)PkRb9hnYMW6JS@+F-4zS!8QNNrst
zoUr=1qJ$vQl-xH@*I{LDEDC7f?A@`~`DV^Fex^0#!8+dma{qAW0AgC;(b)g&pLj*M
z_RZkw!X18z_nfza>K|apND`|g)BfqkH9_P-ag+#E7*&wh^1B`=x{wPDu!Y{UhqwL|
zv>&5=x5)6PE=pz}00Khi4-8{)IL7rQjT{afZ@m9^vqXU;82os%P=dN%+NZwBfgLdF
z&Wuy7l&osO7S3OPr&3T{3z@_cAu?Ti8&OaNty&wfDpmIS<+PX_fV|P5^qkfm=rPJq
zpoB3zmkv$cui3do;fy-43_Cb_)SRyMxnHf6?Y!x^$P&$cabHNgWfz1Z%2UMcQEnZJ
zbQQjd3*{*2`*~UAwGi&C)hymKC$;VoaGB}Jw*o=O`3QZtXh)#gHnODy%&-B|3d-ZY
zHWF?c&?pE8Iq?XJ<8(54&wLnh=qt()U_ny6KqM2%KFm8|m+#{Lxdh~>5t5XV?q=HW
zy7Sn-eh-p`i-w#wNuIX$%DMy2>NPrdM>H?PF04A~149){8rXc`lCm-;r#x(~2-%z3
zi{7fKbv`(8%%VsxW`r%?K@$58vX(<_4;Y>H#8TQGOmtN2DYT_Ud&BU3mYTl!+^x^M
zA{4&wH2Bw6k(juLU*CFFuUFN2`CIvd(r!Z3tw6p)htRe1g*%3nO%%7l{Z3sEqv9L&
zRm_QPXOUv3vq{K_7{{cK{GP2Y?`RkPphYS9?mOgToH6ZReNq@X|AwwEw9w5|KNpCW
z4o>}_qDd&8sRb3=WtR`37l`yr8L3f1EaNCC9VRemLXVtwm0zHk4}S2&diCU^Xip)U
zDO&o4_D`cohg+85q(v^P=9In2WTkxl>b;_TUHSB~_*&XBuJxMf1Qf>4jG2bYY5#ot
z@{*UJY;IrN8|m`z667+UKnDt*yiiU9@o;S!wPDr8RI_`PfowFthPNdS+R+A5{C|Rc
zRjV|RT`RVu0v!g%3!D|U-KyH()mNv--#e4BDSa=c$$g0xkr3`jL!ABB%Agxb7-Lz3
zwdV?Oxb0j?Ki%l$f2;`#+sl?xYVEC+%GLc8P+Z15dXi*8>)qJyDMM!{hq%Q4_J)J2
zCN#@E67NTXSIfJhox(R!Dpvc{+79}R^`NgEi#TJLrNPE|A-!kWTrRSV`83TQH^}#S
z{QOFdJozW>@G(RsMl(JI8nxS#t2?;qa@y*w#~E{}zH*Oss?h81gc7N_Sl+K6Y>AJk
zy4hD;S=j3GF!b4|NbvXpk=V#x9?Y*Y+v7H?=isQ@AEtLA5ncAmlS#w7l+@etOTP!_
zqH)!$c}Viz7`eC3)P8S^)~bFsbigvcml-A&HT1SMw%=+>(PhK1IN>LEfd2?0g}~_F
z`yc)z%J+0~E>kM3=Kn}7vi*YlqF5kZSS6vMQ-!3#9!fuaJLRZs{#6>sx_rBQ^7Zp_
z{<3X}1!0I!v}G`d;f6rrSk+2}2opaKx&EYtMY9D*C-0nxQxTRP?Gd`>T{Plvz{>-v
z=D-&8U7}!5kkO!Dj5+yc809r-
zke)(QwIW}d9hYi>4%XPt6=%7Cm^x@tAfc~fiY|6_epJU%in;acy8sz?R-Q5Pwr79h
z^u347VGSe%Jx=5cqgkPcpSFG0XZx~EnqG{!9%juYD(BVLeYf?&t45I48iZ(g?OC^?
z7}sJEXxx(dw6gM9%MiJfq(V{gVW28XrjJ|9v3J6*3X3{@89W09VZt%Lc$3FX;7gM0
zvAUuA9E|oITk&qrsS7E}-(R)s-_%eSCcwf*o?6`HxgZca)E5H=BmWT2a;XNA;!8z?
zZOuXp+oKq9FbE^KsvVqVT3;}yt^kty!>wDkdpRPT$NMLY|zHbthrS9(F}>2esK`Vd^`PyK-dO+6-^nWwZunJ
zm>@Ky;P>j9u=Od!ZjwsWCe90IQ;0cAQ5jX=&Ku@fC76*n`b$mO4X$0f9Cg8Dz{cDv
zo>8)X-z4RfRCBd7Vxlx807p5miY+Kgy}0;_(LH
zUw`?qp*(&><9f|nWZTntS71*BQU`3yRxDQz)>wRlpKr;2lm5V-a{EhWf
zT2}h5`9LJ;DV}&?U6e6keUv=>YwXi@H#-?XC`#M$o8gqZOr*1JmaBqDRX$7Ak^wvv
z2k&+T>1p9_$`=&2Vw_RNkPn2^2zlFc6&IbLi9hF_m5SHva||1f*~W(kf`f#!9s@3d
zGh1?Mw=(ofBs&-!|H}4SD3+evSM2rVr5U?-MCY?gxe#Rr!BO
zY)evf^s?s?>j%fY3Qc__h@K1|EZiEtTa*e2;t4QL_XG=@oJ2CyxY*N+XoZ>@s$|}s
z_G*|vCkTovLQ|JLomQdv^J$szM%nY0;amMH3aFNTO*C(&-R}u!E30ywQn`iQAXcHd
zc|^pLfwGTl0*}o=jr!y0puYABUJnflGZ(87n_iSWKZhp{Bb#u^;E}0uN@fNbCjIw>
z_qZ(PN?rrOj*KWjPoSbq_b`CYn^pT}f30mDX
zjwj0labeR#nXXv154>{B7yH1pmH-{R^B&yVa21A;}ZV!==18|zjlfc
zSK$dpMH;C@V`X_Y^P=LOBq&hebr=H^*(s)0vW~
z$u{$kb?j$%`MU;#0cgDqsKhM
zsvRH`9?G<0;x@E!cG`U7MaF(?BD3zV2LbnPU_IGNF=;M4!lI}Egx3dj1%c2T3UkwI
z+f?B7y35LXSIQyn7H}W#%(VH}3#L?vCNyxqM$%lAd!b{Gr=KZ7`|e8O{hAo~43&nl%*dU$
zgQ^2YgSr14Vu01mM)wrJ_3LNFd`Vz_!R#XKqe^*BTN9qo*#l?sCAN)zd8QBt{4*Wu
z$-^SnO=}QCBh(F5`LJPF)3h&&m-=iy!HH&NM<(LO)0aqgzT!=%+a9(;B_rm5{R8r0
z`)WY_ZH<*;-ya*usK9~GzdKKu+8F<}M4?a8>K6qo>cS8Df@}tl08ypn(hEl0u#T;~
zU{HegA6WnUytbc$zaVyk)Oz@MsR&X%BRG0lg(=!*O82RdD7J~E1Vwx-?M5qc8&@Ei
zL)qdss*5dGFm0mS=BSl&n6KQTce3+G(S$?XUE`VVcUA9n
z7`K}^)MV6byJl;I7hLuDYOFLS6}DTZI24H<_A|a}YRJ3lxi0iGu1Pm;Q+bVCk&~AR
z?an$SGP@4TiN47>p;!mhP_2M5j`F<%e%mW6(1!34J;9KN#dyPyt|HUpZ;~NJ@^R=x
zr-lEXkAZET=6NMy8|@rumEaarbs`2JA7Q3_kt7|S(RdO^pOQA(V@G|%_XPPXcVZj#
z@kEKJwmQ$}RVsMUk%5X(CFhEJHTY|0y}c@U!`8<1j?!WM0Q<^K#&!CaFMgmKjTKH1
z4#=22mC5pnpwm~fZN>5L>dV9wOHH&^8y}vZ03jZ^N{GPK=#qAqY%5NobcagslMD;W
zmXx|hiIx(au(iT)c(dQvx`=e7sL-ngF|-#3zd*^f7ZvJs#(v~Z0YO%RBRm1qF9DUv
zu7s)#PN~w(j}iXD-G3^6C9Vd%y@TujK0SZ_bX9&bbIt)%UqJ)UP3lrzUuGA&o6;f3
z?=ENZs&&LEnfLx0vFf|B;&qVzS7p(9t@W1D-AAdEcfQLkI?al?W;B<+d6fP}`+12x
zm_98-YE4Vz3J<4cQnp6^;67C`|6e{AA+U5a6n=7N^B8F
z|1`yVlhF{v<)h$<~an)c_;}@m2pXIN%Fh)@bLw{L%
z5RZK3)#53#MtV9w-$Dca$O$u
zEt-Q>Tds}iqQ5c4l9U~0d#VvuY>!~yp9&F6Xwwf<4Zg6MwDYyUS9?t&a&sb8y2s}{
z=BvwqQ*u-a)jo}7m?JZ%0zB~h6>t0NsHjmvn^v%h
zJ7Jp}AW-Cqxe~|c7_=@Vz`
z$?%WZRVo10EP~P+-GrPV3wztM}a=+*_C%WQ=*f2G-^>i0AOv!zb
z2ecAD^VX=h)-u;#VP7C16n2yts!eBWw48}Fh`Yd$C8AqPC`d3%W=w!{f*-CPEctr_
zE;a96Vj|4f)L*5UQt7&k01BJY^>r%YN-WVjfx9Z5-JlcYHnz7O|Dy=4;6{q!0XFAV
zsX-T2>MJ1}Fp}6K91yx*tQZvtU6?M=fs7{SF2A5&a-=0?GaGq%xf|vJRTgQ5%oCkX
zw}5AMigaoiaLD1(r^yV(cjsZK-)FkKVhFn%B3JkR7(Q?r4)mN4snI}-!
zhFD{EpW^)#FzGLKGUb@g?xYGFu5i}eTsFq@#0qE;Zyz*1tn;WrKJ8{w{$8sXmvMlL0$YMfN&C+LSd2gMqxjq4xxH#0(TFL8iz|pPb+k^#01NoCqyR+&dD{1Kg$-V{uYVv#|E-OdCl<@-4Qs!I^rLzL7~jR`5BEMxAL^Ne&*A1E2dd
zOZTIrdHwvIh_bmt4=e1~uBR@-kuS^Cvm6aGvDC`_jK}8#WsWh9*QH-}5VrAACP4>q
z*0V*G`(OBebf_sbLu|gDva+uBXafoxwpQF~F@Vbg+`?Yur()`fv)78I*w1cg9mF*+
zPoTbC9at?3<{UQhYU%BnZp&w^dWS*XNqvPcYIV&Tk6ax~a@w#;_xzaHKAti9O&Kx%
zEH6be#2rs}R&QKTHb~+7Edu)pL;odGH6lyV!X)Q1X_rlFU{57|=-4=#fPzZWC=z#x
zvz&ZXFe?S+b#3t0mt167Yw_pgD@J?GK}HEXxIdvNYWKhNFPjB2}A
zgzqo=DTD-EB{mRUta}}f7qIARlWLcjx$mVHXdbm?7fucXS5BVV%_G{CpY9)T!rVdM
zcU8o_&w3EDGAab}eU=J8p4Ldkea=~_Qu!fKt&)FBT5+D{OQv%EJ3S;f-q=#;B2`z)%uwlt_9m+mO*}*nO!Co+k_L@;+nX6rE3f+(hKYmmfHkx_amEaD|3l6V|DE
z01n4q9;+l6AG=<2&1djpgG&!HuU>d)Q}vS7X3c*gpBr7y#|7Z!HVXx{4)(o3e-*%$fg
zS%xT!bgp@lOE{~zf^H_Ko(-ip(I1^|*53$Xn?8Haw^UQS3N=Cg>p$8Gn2xG0U$4H)
z5;F&8`F+_KvzYb!OW|VIFbg-0w6e>584NZ6NZD*E42O7pk-uVmfrm(@$7El
zR)%iDuHoWXc<3|VJ2_qF_N>glN>EyQhr;<(BpJT_}C&ET~9K+BhIDIz9T!
zhjx}^V^sYOLp-;Ri-l-eNXj**piQEE82SbfVZ*&la>y5VCC594!$tZVLhQQG|M7Yw
z{KpCaUb02>^r+h1e%B={XChyKNPXuDtM<^-;3RhL?&`KnmgM6w5@-@l#lk)unnRpX
z?G*sBQ#XwEKPNBzreQTiCCrk{TtD5r<)oBlX>aq|Xv$eR)AH$?mxb+lcvfPH=p`m`
zEqVPblG1nqgQQ_B3Q9g7kna5QiZ)3VTJ3!=lC1DzD2)4w0*sZCid=>l@rYYbqm^
zL1W2s8^v3)aW1`$DAH4|`x=J}ZAU~t#qHW}B>rl&MpBH9Fs>;$PmeOOcCZO8FB|3r
zf3O-76^>y5Uj3TRCT6-3cz>pc+GtT&70#N1F+XN7-lOt5$D+sXK>i&INYi!>IA^Xo
zmvxs(*|kwr^%0Z^oA!Z%=O9gN#-K8S-}rZdEnj1(z6}yB9PC6kP?uG;t>cg%zSJ-i
zC+**ngir%U1s`0|HgkxSepDss#j*Tm^`8l1`Np%Mp?8N6P8O5cf$2NDC+_I_(Zs4s
zw%gk)+(j>t^x{~?4*2YwAN3fEq^?L9r-zia>`%ux(
z1$v5k^nbfaa9%!
z;TClq^9Ild>hj;r!u)0@H_-3X&S-7EaBsjQBe0t}8PkBBo)^hk1Niu}g9Vcq*OLaT
zu}Dm3>{F%Qt-O{{z&gYGw4dIFn)mVpTgO`&pJ$s$s
z$1snQRc4)SCZyqjbg+H+4kXlL3inMg3@<)rr?G+p)0f!Q6fQ401@(_w{4ziESt%UX
z1RotC$JKCpZ;nfIy4L348a~}4af0+Xaw8QD{}>MaGmh0^Bth=^F4uxvM$p-L*2BiP
zf=_)#=!YTI=;(HxZY%!plW5L~ZvQhCGs6D*FUPG@6J
z51iSAkI279eum7wf@&>1X}kUZV8yHUOqK{Mk4pe5RJ_E0x&b;0HUuizM
z~3cilWHC~HF%iQPL-LFZ2V$+zz
zxFepCZv8zs#QXM$n#ZVnMZM%T%8N+x>UyJ@^2Xx#jAa;~{V1^j*L(ok1t`{9gbTs$
zJitD;62D&qILvUg4ybEOwwpdh+0flGxGSQzVyy
z8G^#Orn)`FEg+iCp(S6WVL}$+))=AZDwAm!uwN6=e1tIgOEuht2qy1B4Yy-8>BCMr
z_?<*GX5N=#t?EGV&EPc2NRPxA$3;2GjTRHC>+mCb?N0-pdCT&GMu~;uQksWak*ge>
zpp7jTV0jh`J?f)*u-QH&1hvrlk7???1SiN4F7`}n%xz-SLB6>(
zkfv^#{#1$^n;MQqKf~AoJ_z+FtjcLGOgyF(o*FDfws>*EAK)}-LYOHm`JQ-%7BT~A
z7gGifkAkZv^3x{H0-v_qxkS^Ke5v_OGAHDZDio7>5BCm#3-ZEEyVvWzy=1{;97m!Uk+
zlb%_ccSJcjyHodT&OFD;4$OaM2N)9pz6#`vu4)IRZIgb_U-2$lA9o)a)9(>@T30iK
zz1o_qF}{;c+GuzNaPrS)rBvp*XUC#Z(pK^$V(a~ywpf~4w*D^|cme0d`WXR!DbsOllhq3Z{i-fS<&mLJZ=r`Gsd
z#NiK^-Ufu3M;*+h4@p)$rX1stF`W`+@61auFrc5;-^i8qiu1XjpsZsae(O}DVfbqy
zg`GjqLMJ@aVi>G@u02J1vVRWlYW$e}kt(K-5wsVvCi1Vw=im_FYTo2~01N#kF!wKR
z(kc>){nflgFn^@pf2s0i(#wOv+CXFLpnRJfrxM;N_}E@i&F)N@r(>bi!5i!OG-yZk
zWQLuiRe5ePG>G0@Xl5x%3nf0EljZp3l!L#qO_AEyxer#V?N&;6+qW+xA5xKKy&d^W
z=TA`LgA4kxK($R3`=fWYFVvd|j6!CA@0RUkFy5t^Er4V0P+LnQMm)sg2M44Uy4k&!
zeuO>I`w=+ZVyBh=Ipu;CnmbZe4J`yi9@-G3C>P
zgL%a*m1Dmrynk%HK(pte1RVLT%=29!vC$ypCxN(W>ztDd>CmS-p18CofILosvi9b9DGpTSq9=Sjz#BYEpau-SWRqe^j2DN3gqQFEfH@@U&2b#f98ef=0U
zzvsOV|7^KdF1J-XG}_G)87l+*_e9n2_wHY(vw|kFz?8)AZ{ZuQ&$1^3RIeGszCXz~
z1dFmpjV~f5i_r*u(Ce(S1>HeJe7y$jz6gQ*=IL-huB(ULG-PxT$1}-?hqAhV@)(Jr
zhD$Ex5xr#kl~mIzDO=wep&|B%Wy@PzcD99m+ozwsi3AE7Y_T!y-W%C$XSk)(UNbJ6
zc}SlZs<8DEq`Lq#@c^`;EDGR;wMp>Ve76#xjL95G7?MC{u(ig
zF8hjUFg{0XW|J(u4o^@mYpuqOiq=uxam>cmWX_^|&ZfSxP?rDFt4R?eBMcNcF-S@#
zpKj*%CH!f=fyH`N?jNa~A|b(uCaw{d=m%jQI6pyZOBAZ2AQ9ugltBkz!ibZymZkk-
z!@BJaBAK+yn$HL60yspE{Io>^MHg;Ls>n$TeOM`EU33FnM&A$zIFhGpVkx5*@9J#v
z_ei?=`)jCdxS|ck*rwKP7jh(p8qA~K3W^r*QZy*|@dsb^vLUe|hA~qxX5u(2%-BrQ
zIi-hRb(R|`&R;Qgp$zklSXQWU#8}XMB*M*PMk%69-PGUn)aR_QVqV507zQFXc#N3j
zioDLalZH!?h2d7I{euHFxZ8^RPMzz2g
z8p^twPo>838ul5%7Oh_^F#`P`V{hRXWf-lC!Z5(l-Hmi8A<`YvASfL}qjZDhfV2pR
zARs9SB1lTt(A_bFAl;30-4ERRo_)^V=l^TwsHsxzc5H;?wd
zPZs$YcC?tjEf-L$+(REWD?U14M~No4r3`J74#U*-rPam8F;wztmm7N*^eTG5<900Z
zD*O6RsgfQ3;Ui7=;!+w7xHBaB_P5gmVAVbv&VsSrUF*Fi=ea~F-;i9LI=&0C*gGD9
z0&=M^LK0S{Cc8APTN{%-edd6dtsF_)@==SWo|pa$Y>5R)8b&hl^VInjNUKmzzbkrB_^E?1cjxievW$G+n`|>nmqoJf(1J3$XWSF
z{U=>7UUhh&34~B+rX33+MpT+Q|FsLLXeozZDk!B^W=#0`%<2l|J5Dojod|RJ6xHBy
ze#TDyyxy=RQ5;Q3X6N9bg^8Q;MW)+N^u*+8NTC*1(}&qov=?megC-EM$0BC&?7x^`
z7Z^`p9?;v{4k`Bna?1}c5)m!oPgq)?_fe_BlvH!&jSq4Nv>7+2?-%+zOO=q$!4JN!
zlgtxK=No;g`RJZc;E&9P%_5sL=!ctFXhS$6ob3`@;Jo
zQ@u}jK$C2jpOch!zxWUqT5euDHtd4bmyJ3y;Fk7h;Nig#mNe+{I7F5Jq%&4{yzN&{
zRJgN&O=RnoG}yaX%1tGY8}B)7+tWcQ2{IlS)!#v1$>BOWoUv*nfhaI`cbVP0|%0*Sbvl>8wbz8sS3<~Pfjkies$8cGJ+staqbkElNqUWklZ6N*b
zW1jn}QQIblPAqS96O4M8p`$uP@#MHHIs@|N<=R$ZK|W)ws5a$Od+FHbHFM8&b5j|>
zvIP{NuxHrctO>vE3#{<4Cw}-k0=HCc+l>iBNumfV|A?vB=z~$Hp0WY{+uEL=tte%w
ztV+mZU!6TpzyZuTSloIbnB0+*s+tihREGx?s5M!U(-rW7$}0|`_FS)*AE(Ra?y6Jf
zl=a6-`Bu2=T#lTx>0VQNp$4C8fI`<+@&ggkfD$kGdlTaO^?A^mx1$!2RuYdr`SFf`
z=Nipbi-U_Vbn+38uE+l9Q^&^>xm_#A8^g#@wz
znR-R-WZDezmvG)jNG5!4)Bz`_@hhbh0bmDjebVKDLAN5D#`--0Yx5Lt-(cq`u=)?*
zjRPYV{*vZ@l7!lce-%=bd2i1>e(7jYr;y0=&~+K{PCjq=Em)OhShGE(G+UgAJ@&n=
zIw8+yQDV1c(y=F}ijIhlQv)djbixS0%EBVyAP>^gZhaTz@MnEORc0W%AH7T~dFbPJ
zC~Aw24R*v&bk52$E|K~oq5ka+9v@G>%yH$#X`7XLok(mC#$_kNzEV@bXmBUH+t!QI
zKVepdOq~K)yT6@kd)H?^0zIKeg7L#nzp#0&P9OK%-Tc`u!L>OmN5sJ^1uk{5movY)
ztH)J|3SX^zFv3ogB8&c7BwiJW=*-2U^$vkMFG-rWg+sgQ0fIFr0x5+>|B|CP4O9f0
zP!S$DQeU=!RhxXD5@6t}*Cs-eRkN}qgbih#bhOqe2+{yL0f-ku;lYt~vhi)?PuwBo
zr(Px;NPP`TZlOWrpLiIc^Os7}37W@@3GCR_LpD9Hc;w8O1<=}m)MXFq;TIT(J!Scp
zTPl{E^8R(rO^fA$VOX|mZZH6PFjt?2ah{up_chy6I+;HwC`_&Tqsa5{lPW60h|L-YlD2aj_W0$T-DiVqKItl1jdmiO=@F=Y
z?lVcwOF{OJkY^o53IwcqjXK#^@lK?z*
zsU7BJ<@sa8iF$0SE6rJ`BcdB_^nwLC;l)XVs-vU7=0R+$`ZhUyA$yplw+)_a{W);D
zBLN?~x{vXa&zjsvYpYLhDKkO3znb;y)Q?|2%VDUEWq4G;)lWm}ZwR90=gIL=7d-Cs
z*qe95G?7ID-GN>Adr!AmZaB^!-FB5KIm;D6K2b3gF4)kdSLFOtV)d*>7e;{H%0N(5
zk^2L?@x0zuBgq~2PNkiA3VGGM(ht%h1IKiOup(SoN(r^Y*%nWWk}#+$N2p*;+G&(<
zmH}S_zNHZ1r7jQY2kyW-(R82iDoihl+F~Vm)z$GE%uj3DdhBe&$!AdcHqCtTPi8M<
z3q0?7IdNdB%Iw-AzF+RecZcJw(u|F$z=-ndB@kJPj1Y7qO3-1PHyf#DzW+(-ZguN_
z@)G}u=Pw9==?^rdE;7V@`8RccW*$j8s%bqkR}3P!gcA!3%{aC>BgOGC49|wGn)intPc@`E^`QDf$2=48LCTNIfdp7A6JV!1%f*~Fz!3_OSC^T431&+$7W^+
zqvOc>DoWFb1x>`@FDG%}moh^I?z}{*9M>oI{9KKb;C^E`B*bhKohg8Y<=rYGs)AS&zS`#n~6bh%O;4p7oo1gc=&}pJG{S2iCP1D$>7eV)p$OIbM2y2Qa+f
zx0J34r{C@`d*KeTd*dKOu8C^uO~;GB{EhzeSB=N_E4%Hsr?Dn{N-zZgBFlrJuGPUbP$2sdwytDSbQCt8u9B~bq2nKLNOjv&|I7_tb|}ND6FFJo$%M<
zEv~R2T;v2_X;xfy`eAjrzTk!@_()v?8{!E$1d|s*kuSY4@es1BD;%MU+^&EnCacU}
zq)$~VQ@&r4%@}oP&?j9HUfIX8bm$B$a@;6m3J}}lAPv^(+oEwUc9H@1r}sQQR0|>+
zOOrw*qjH%@K#^pXi=?5qCVC+#?9Z5ZB(H}swG=ad9u>17D{K@#5s_+opLnYszO}fL
zvRAJ)!<+Tm@Id|a>#2S_iJYbfuSVH?yXTCX!5Q%@@(@$TQ?nhn{KMxlt
z03huA)&B5Os$VKA9e&*$acWTXE{GU*u^6Z`#j(SWIZwj^&@i$&EL3l>T!}l|)HPT2^7R@f2>+aw
z$A?qiQSi0?PZT7UcmfwjO}iusM;u|cG%|-XC^3qAA|23sJkmcz}Jf^e8(A&@m}1ed+?{W(nP5zHbi
z-1I4_IRzMufgS7Lr;y&j?Wmsvw5LZ&sE-nC4l6!4bCe3-o$e7{yQmx7u4uC|38}75
z{2#QUUP)CqEP_)b{!hs)a_@Um;5uQzcP`OSY|i+dP{k4kriA^`}$76p;mzTn&L
zf>dDZTK>fT5A1%RtLOpi
zG81w-@~syi#rnGE0}4@C=A*fQvPOJPhAf3F|3P$}t#>p$L!4$6fqh+yKO
zh|R)&>jn~T4r)J`85*u2Pw-QFi)|JeX5Li(ZBo-
zHj8+%KYYSKPM@w3;l-F@Ze?F3#A7lRr!h1NmVJ?6L1TspyO*fnr8IJT%vt>KJN36W
z&BeO+y6sNjoqn*6YFw#@jm!=jst~^@Rv+!5w;y7L)6-l_yJQ`BXNR2atVnKNL=D=-
zK4!C(sj0^L?`*_R^mN-_c?c`$TP6jBHQw?Vul5cJ8NGu$f<5ca>}as394Px%=RDKo
z0F{_x6MRWU>A0CQNZE$=>f-w5>@^@_M4;Yd3&K*BciO$ow2F0aXXzAcvn
zz#=2x9>l_zp+#rE2QH{r9CIG@IvPF)6*SrZ(P0izB(SAEDzg$%>bkB_d509@{uD%v
zj+sv;@(z#nPAOK>!$hc3N}9C0(KzV88K>AHbp`9$l0M#E#)`lEMMoMdCG!C+ON`)C
zhJ&}U8(ciEm48+s->U|5>AKt|AX6?&B1jQ{Ox>eM=%yn`k42EO>J*IdG7XQ`6bXd!
zQhoa;Q)OR_`;@^?-jo*-aD~C8fpJJGO=yxLtL0zrN+Y)Im
zixUo)EyJ@7gmSvMZkWRc+ShnqE_Ic2xsDBEC!#Zv85k>{i=gQK%@#>bZJ*?K?THL*Y7WJc>{y#raa99SDo2G!*e1XkPy{iC{B?T7
zcV3x<8&z0sRlFgk<5{j+ABZ->a5o0bAB`~n0!rXapef!NKBM8=<0-_XkO%fchF)mQ
z#s_<4*WkivAae6|36-idFSg@?lq}^MFo-ERIEh8paSw~ame^L>P`Sq*ZN-eUKM6C*Lg9A3X2XRq$_$Cn*|C}+4zwu7J
zX(#2nAc;T{e}j?7jm(|IoEJO}>Bqj?Hi<7SkinJFO{U;MmvYj;6uNVLx=CW1`M26joFH2q3EFC>Xmfn1kZ619w;FN
zffI-dH?iAnM1g7^C8!Al*6GN>#ou#9Pe!Ks6~IFbpb;4g1_l?Ud2*d!
zS?EfSNx^UbAsE?CJHa1LnGa;8rFqf!pwP*+>~)xB3z
z%vFCgt9y09*f_i*kI3nE^HOAL?8Q^cpeYh!)nZl8nsV7cUfnfqMI4sIDc?ki%O&Hh
z$?LH?JRn>eS*vIx`WB-f3|P@s86PdFLEEymgxF{HT6~d)1pzNzGc=JU-b2E~Fzw!2
zrAevmYfUDz`jY^CUy=Fo2#I-aw)o<&J)#Rn;c)Ex0Z5Y1thGwLG(aL_SNrWbSZ^o}
zQDBcj{DShj45YGBG4TF_<~vqUhWL8PK@&8@Sr!waGVAC?j5*6{S;LzCMXAG@LM$^IElf`s
zk-?Ny9OeCBMfmglR~QKm5#IQvZ^~*vJfE^rQ)mC|P3hyfbTUVn9hKTJx`qiZf;@ZZwdJ2BF+VXZB3
z69&H=>L@f4M+$h0L36vwMu{V}gNeza%OYwtoxLw=`P)oKo{!`G7z*aGY4syiI@d!|
zFewYZ&u6vC@-3O{5U@`C$RUfS?RN!J`d~tV3=m~V_QhEnv{*ZF}fZ%%Y*u^
zHEaHV{C>uZ?cqrm!XC*oQn>G;?2a<_|8VfAV$)(&GD*2*Zb*@`)PZ_Kh2=!B3@he;
z{rAJdT$fwz5C7i8ki8NP?w(suU+{SN_xNc^fhx3Sz!-;bMR!k+ez)9tooJ7HOtoeT
z;PWv2J9vUn#E$I$esD>xhl9t#`>!SU0lBgN=%s>>pYkm>#>mJyVx>gZuuF;QuH67>
zKNPQwlQ-EYWyjg-qG=m^Ei`TRHEHNO&x@frt3Wm<-c#xrFNRWDSkje@O$^DM%yX>&
zdh|GvCztk}4@b|q`hHrBfuF81rOV3V12|0wGfkgR)y?QE{~dj}X~^*Mc3Z@cWvKrD
z$3Op{j}BQYK{0Ak;<;{j#r$