From b733acba6603bfd3a8edaf1f3fa5d80d02a4ce01 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Mon, 8 Jul 2024 17:46:43 -0400 Subject: [PATCH 01/90] Add model configuration for rest --- samples/rest/model_configuration.sh | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 samples/rest/model_configuration.sh diff --git a/samples/rest/model_configuration.sh b/samples/rest/model_configuration.sh new file mode 100644 index 000000000..ba10ff813 --- /dev/null +++ b/samples/rest/model_configuration.sh @@ -0,0 +1,30 @@ +set -eu + +echo "[START configure_model]" +# [START configure_model] +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Write a story about a magic backpack."} + ] + }], + "safetySettings": [ + { + "category": "HARM_CATEGORY_DANGEROUS_CONTENT", + "threshold": "BLOCK_ONLY_HIGH" + } + ], + "generationConfig": { + "stopSequences": [ + "Title" + ], + "temperature": 1.0, + "maxOutputTokens": 800, + "topP": 0.8, + "topK": 10 + } + }' 2> /dev/null | grep "text" +# [END configure_model] From a4e501eafcc2402590cd3efcaa9e5e0bb3292911 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 9 Jul 2024 08:16:46 -0700 Subject: [PATCH 02/90] Sync count tokens examples (#445) * Sync count tokens examples Change-Id: Idd9cd0956ad9b2fa7d95d8ab792b1673dc1e88a8 * format Change-Id: I7e7ecd1b4c0e060ef91bad0f5083616f0394705a * Fix video file name. Change-Id: Id1ff1196fa8072cc90d1a3921846687687b180b4 --- samples/count_tokens.py | 220 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 199 insertions(+), 21 deletions(-) diff --git a/samples/count_tokens.py b/samples/count_tokens.py index ca42a1bb6..827fe5f1d 100644 --- a/samples/count_tokens.py +++ b/samples/count_tokens.py @@ -21,75 +21,247 @@ class UnitTests(absltest.TestCase): + def test_tokens_context_window(self): + # [START tokens_context_window] + model_info = genai.get_model("models/gemini-1.0-pro-001") + # Returns the "context window" for the model (the combined input and output token limits) + print(f"{model_info.input_token_limit=}") + print(f"{model_info.output_token_limit=}") + # [END tokens_context_window] + + # [START tokens_context_window_return] + # input_token_limit=30720 + # output_token_limit=2048 + # [END tokens_context_window_return] + def test_tokens_text_only(self): # [START tokens_text_only] model = genai.GenerativeModel("models/gemini-1.5-flash") - print(model.count_tokens("The quick brown fox jumps over the lazy dog.")) + + prompt = "The quick brown fox jumps over the lazy dog." + + # Call `count_tokens` to get the input token count (`total_tokens`). + print("total_tokens: ", model.count_tokens(prompt)) + + response = model.generate_content(prompt) + + # Use `usage_metadata` to get both input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively). + print(response.usage_metadata) # [END tokens_text_only] + # [START tokens_text_only_return] + # total_tokens: total_tokens: 10 + # + # prompt_token_count: 11 + # candidates_token_count: 73 + # total_token_count: 84 + # [END tokens_text_only_return] + def test_tokens_chat(self): # [START tokens_chat] model = genai.GenerativeModel("models/gemini-1.5-flash") + chat = model.start_chat( history=[ - {"role": "user", "parts": "Hi, my name is Bob."}, + {"role": "user", "parts": "Hi my name is Bob"}, {"role": "model", "parts": "Hi Bob!"}, ] ) - model.count_tokens(chat.history) + # Call `count_tokens` to get the input token count (`total_tokens`). + print(model.count_tokens(chat.history)) + + response = chat.send_message( + "In one sentence, explain how a computer works to a young child." + ) + # Use `usage_metadata` to get both input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively). + print(response.usage_metadata) + # TODO add comment... from google.generativeai.types.content_types import to_contents - model.count_tokens(chat.history + to_contents("What is the meaning of life?")) + print(model.count_tokens(chat.history + to_contents("What is the meaning of life?"))) # [END tokens_chat] + # [START tokens_chat_return] + # total_tokens: 10 + # + # prompt_token_count: 25 + # candidates_token_count: 21 + # total_token_count: 46 + # + # total_tokens: 56 + # [END tokens_chat_return] + def test_tokens_multimodal_image_inline(self): # [START tokens_multimodal_image_inline] + import PIL.Image + model = genai.GenerativeModel("models/gemini-1.5-flash") - import PIL - organ = PIL.Image.open(media / "organ.jpg") - print(model.count_tokens(["Tell me about this instrument", organ])) + prompt = "Tell me about this image" + your_image_file = PIL.Image.open("image.jpg") + + # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). + # An image's display size does not affect its token count. + # Optionally, you can call `count_tokens` for the prompt and file separately. + print(model.count_tokens([prompt, your_image_file])) + + response = model.generate_content([prompt, your_image_file]) + # Use `usage_metadata` to get both input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively). + print(response.usage_metadata) # [END tokens_multimodal_image_inline] + # [START tokens_multimodal_image_inline_return] + # total_tokens: 263 + # + # prompt_token_count: 264 + # candidates_token_count: 81 + # total_token_count: 345 + # [END tokens_multimodal_image_inline_return] + def test_tokens_multimodal_image_file_api(self): # [START tokens_multimodal_image_file_api] model = genai.GenerativeModel("models/gemini-1.5-flash") - organ_upload = genai.upload_file(media / "organ.jpg") - print(model.count_tokens(["Tell me about this instrument", organ_upload])) + + prompt = "Tell me about this image" + your_image_file = genai.upload_file(path="image.jpg") + + # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). + # An image's display size does not affect its token count. + # Optionally, you can call `count_tokens` for the prompt and file separately. + print(model.count_tokens([prompt, your_image_file])) + + response = model.generate_content([prompt, your_image_file]) + response.text + # Use `usage_metadata` to get both input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively). + print(response.usage_metadata) # [END tokens_multimodal_image_file_api] + # [START tokens_multimodal_image_file_api_return] + # total_tokens: 263 + # + # prompt_token_count: 264 + # candidates_token_count: 80 + # total_token_count: 344 + # [END tokens_multimodal_image_file_api_return] + def test_tokens_multimodal_video_audio_file_api(self): # [START tokens_multimodal_video_audio_file_api] + import time + model = genai.GenerativeModel("models/gemini-1.5-flash") - audio_upload = genai.upload_file(media / "sample.mp3") - print(model.count_tokens(audio_upload)) + + prompt = "Tell me about this video" + your_file = genai.upload_file(path=media / "Big_Buck_Bunny.mp4") + + # Videos need to be processed before you can use them. + while your_file.state.name == "PROCESSING": + print("processing video...") + time.sleep(5) + your_file = genai.get_file(your_file.name) + + # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). + # A video or audio file is converted to tokens at a fixed rate of tokens per second. + # Optionally, you can call `count_tokens` for the prompt and file separately. + print(model.count_tokens([prompt, your_file])) + + response = model.generate_content([prompt, your_file]) + + # Use `usage_metadata` to get both input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively). + print(response.usage_metadata) + # [END tokens_multimodal_video_audio_file_api] + # [START tokens_multimodal_video_audio_file_api_return] + # processing video... + # total_tokens: 300 + # + # prompt_token_count: 301 + # candidates_token_count: 60 + # total_token_count: 361 + # [END tokens_multimodal_video_audio_file_api_return] + def test_tokens_cached_content(self): # [START tokens_cached_content] - document = genai.upload_file(path=media / "a11.txt") - model_name = "gemini-1.5-flash-001" + import time + + model = genai.GenerativeModel("models/gemini-1.5-flash") + + your_file = genai.upload_file(path=media / "a11.txt") + cache = genai.caching.CachedContent.create( - model=model_name, - contents=[document], + model="models/gemini-1.5-flash-001", + # You could set the system_instruction and tools + system_instruction=None, + tools=None, + contents=["Here the Apollo 11 transcript:", your_file], ) - print(genai.GenerativeModel().count_tokens(cache)) + + model = genai.GenerativeModel.from_cached_content(cache) + + # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). + # A video or audio file is converted to tokens at a fixed rate of tokens per second. + # Optionally, you can call `count_tokens` for the prompt and file separately. + prompt = "Please give a short summary of this file." + print(model.count_tokens(prompt)) + + response = model.generate_content(prompt) + # Use `usage_metadata` to get both input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively). + print(response.usage_metadata) + + cache.delete() # [END tokens_cached_content] - cache.delete() # Clear + + # [START tokens_cached_content_return] + # total_tokens: 9 + # + # prompt_token_count: 323393 + # cached_content_token_count: 323383 + # candidates_token_count: 64 + # total_token_count: 323457 + # [END tokens_cached_content_return] def test_tokens_system_instruction(self): # [START tokens_system_instruction] - document = genai.upload_file(path=media / "a11.txt") + model = genai.GenerativeModel(model_name="gemini-1.5-flash") + + # The total token count includes everything sent to the generate_content request. + print(model.count_tokens("The quick brown fox jumps over the lazy dog.")) + # total_tokens: 10 + model = genai.GenerativeModel( - "models/gemini-1.5-flash-001", - system_instruction="You are an expert analyzing transcripts. Give a summary of this document.", + model_name="gemini-1.5-flash", system_instruction="You are a cat. Your name is Neko." ) - print(model.count_tokens(document)) + + # The total token count includes everything sent to the generate_content request. + # When you use system instructions, the total token count increases. + print(model.count_tokens("The quick brown fox jumps over the lazy dog.")) # [END tokens_system_instruction] + # [START tokens_system_instruction_return] + # total_tokens: 10 + # + # total_tokens: 21 + # [END tokens_system_instruction_return] + def test_tokens_tools(self): # [START tokens_tools] + model = genai.GenerativeModel(model_name="gemini-1.5-flash") + + # The total token count includes everything sent to the generate_content request. + print( + model.count_tokens( + "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" + ) + ) + # total_tokens: 10 + def add(a: float, b: float): """returns a + b.""" return a + b @@ -117,6 +289,12 @@ def divide(a: float, b: float): ) # [END tokens_tools] + # [START tokens_tools_return] + # total_tokens: 22 + # + # total_tokens: 206 + # [END tokens_tools_return] + if __name__ == "__main__": absltest.main() From cc2a3b720bdc97676fe4db762cd81eab2aeed743 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 9 Jul 2024 14:42:38 -0700 Subject: [PATCH 03/90] Fix PIL.Image imports. (#447) --- samples/text_generation.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/samples/text_generation.py b/samples/text_generation.py index a5e800c75..c4d6adccb 100644 --- a/samples/text_generation.py +++ b/samples/text_generation.py @@ -12,7 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import PIL.Image from absl.testing import absltest import google.generativeai as genai @@ -40,7 +39,7 @@ def test_text_gen_text_only_prompt_streaming(self): def test_text_gen_multimodal_one_image_prompt(self): # [START text_gen_multimodal_one_image_prompt] - import PIL + import PIL.Image model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") @@ -50,7 +49,7 @@ def test_text_gen_multimodal_one_image_prompt(self): def test_text_gen_multimodal_one_image_prompt_streaming(self): # [START text_gen_multimodal_one_image_prompt_streaming] - import PIL + import PIL.Image model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") @@ -62,7 +61,7 @@ def test_text_gen_multimodal_one_image_prompt_streaming(self): def test_text_gen_multimodal_multi_image_prompt(self): # [START text_gen_multimodal_multi_image_prompt] - import PIL + import PIL.Image model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") @@ -75,7 +74,7 @@ def test_text_gen_multimodal_multi_image_prompt(self): def test_text_gen_multimodal_multi_image_prompt_streaming(self): # [START text_gen_multimodal_multi_image_prompt_streaming] - import PIL + import PIL.Image model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") From 0d51b2619889cfb2dad08b90d57c89c3f0a8244f Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Wed, 10 Jul 2024 13:34:01 -0700 Subject: [PATCH 04/90] Add code execution python sample (#451) * Add code execution python sample * Sync with docs. * format Change-Id: Id75a4c1936a13a63f1e22f36d5b7011c24e31233 --------- Co-authored-by: Mark Daoust --- samples/code_execution.py | 166 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 samples/code_execution.py diff --git a/samples/code_execution.py b/samples/code_execution.py new file mode 100644 index 000000000..cd82d676d --- /dev/null +++ b/samples/code_execution.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from absl.testing import absltest + +import google.generativeai as genai + + +class UnitTests(absltest.TestCase): + def test_code_execution_basic(self): + # [START code_execution_basic] + model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools="code_execution") + response = model.generate_content( + ( + "What is the sum of the first 50 prime numbers? " + "Generate and run code for the calculation, and make sure you get all 50." + ) + ) + print(response.text) + # [END code_execution_basic] + # [START code_execution_basic_return] + # ``` python + # def is_prime(n): + # """ + # Checks if a number is prime. + # """ + # if n <= 1: + # return False + # for i in range(2, int(n**0.5) + 1): + # if n % i == 0: + # return False + # return True + # + # primes = [] + # num = 2 + # count = 0 + # while count < 50: + # if is_prime(num): + # primes.append(num) + # count += 1 + # num += 1 + # + # print(f'The first 50 prime numbers are: {primes}') + # print(f'The sum of the first 50 prime numbers is: {sum(primes)}') + # + # ``` + # ``` + # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229] + # The sum of the first 50 prime numbers is: 5117 + # + # ``` + # The code generated a list of the first 50 prime numbers, then sums the list to find the answer. + # + # The sum of the first 50 prime numbers is **5117**. + # [END code_execution_basic_return] + + def test_code_execution_request_override(self): + # [START code_execution_request_override] + model = genai.GenerativeModel(model_name="gemini-1.5-pro") + response = model.generate_content( + ( + "What is the sum of the first 50 prime numbers? " + "Generate and run code for the calculation, and make sure you get all 50." + ), + tools="code_execution", + ) + print(response.text) + # [END code_execution_request_override] + # [START code_execution_request_override_return] + # ``` python + # def is_prime(n): + # """ + # Checks if a number is prime. + # """ + # if n <= 1: + # return False + # for i in range(2, int(n**0.5) + 1): + # if n % i == 0: + # return False + # return True + # + # primes = [] + # num = 2 + # count = 0 + # while count < 50: + # if is_prime(num): + # primes.append(num) + # count += 1 + # num += 1 + # + # print(f'The first 50 prime numbers are: {primes}') + # print(f'The sum of the first 50 prime numbers is: {sum(primes)}') + # + # ``` + # ``` + # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229] + # The sum of the first 50 prime numbers is: 5117 + # + # ``` + # The code generated a list of the first 50 prime numbers, then sums the list to find the answer. + # + # The sum of the first 50 prime numbers is **5117**. + # [END code_execution_request_override_return] + + def test_code_execution_chat(self): + # [START code_execution_chat] + model = genai.GenerativeModel(model_name="gemini-1.5-pro", tools="code_execution") + chat = model.start_chat() + response = chat.send_message( + ( + "What is the sum of the first 50 prime numbers? " + "Generate and run code for the calculation, and make sure you get all 50." + ) + ) + print(response.text) + # [END code_execution_chat] + # [START code_execution_chat_return] + # ``` python + # def is_prime(n): + # """ + # Checks if a number is prime. + # """ + # if n <= 1: + # return False + # for i in range(2, int(n**0.5) + 1): + # if n % i == 0: + # return False + # return True + # + # primes = [] + # num = 2 + # count = 0 + # while count < 50: + # if is_prime(num): + # primes.append(num) + # count += 1 + # num += 1 + # + # print(f'The first 50 prime numbers are: {primes}') + # print(f'The sum of the first 50 prime numbers is: {sum(primes)}') + # + # ``` + # ``` + # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229] + # The sum of the first 50 prime numbers is: 5117 + # + # ``` + # The code generated a list of the first 50 prime numbers, then sums the list to find the answer. + # + # The sum of the first 50 prime numbers is **5117**. + # [END code_execution_chat_return] + + +if __name__ == "__main__": + absltest.main() From 712e0e4b167fe94c5fa6dc1cad2bb4975ce766d4 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 10 Jul 2024 14:48:40 -0700 Subject: [PATCH 05/90] Update example to show part types. (#452) --- samples/code_execution.py | 51 +++++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/samples/code_execution.py b/samples/code_execution.py index cd82d676d..17f8142a0 100644 --- a/samples/code_execution.py +++ b/samples/code_execution.py @@ -27,9 +27,38 @@ def test_code_execution_basic(self): "Generate and run code for the calculation, and make sure you get all 50." ) ) - print(response.text) + + # Each `part` either contains `text`, `executable_code` or an `execution_result` + for part in result.candidates[0].content.parts: + print(part, '\n') + + print('-'*80) + # The `.text` accessor joins the parts into a markdown compatible text representation. + print('\n\n', response.text) # [END code_execution_basic] + # [START code_execution_basic_return] + # text: "I can help with that! To calculate the sum of the first 50 prime numbers, we\'ll need to first identify all the prime numbers up to the 50th prime number. \n\nHere is the code to find and sum the first 50 prime numbers:\n\n" + # + # executable_code { + # language: PYTHON + # code: "\ndef is_prime(n):\n \"\"\"\n Checks if a number is prime.\n \"\"\"\n if n <= 1:\n return False\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\nprime_count = 0\nnumber = 2\nprimes = []\nwhile prime_count < 50:\n if is_prime(number):\n primes.append(number)\n prime_count += 1\n number += 1\n\nprint(f\'The sum of the first 50 prime numbers is: {sum(primes)}\')\n" + # } + # + # code_execution_result { + # outcome: OUTCOME_OK + # output: "The sum of the first 50 prime numbers is: 5117\n" + # } + # + # text: "I ran the code and it calculated that the sum of the first 50 prime numbers is 5117. \n" + # + # + # -------------------------------------------------------------------------------- + # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number. + # + # Here is the code to find and sum the first 50 prime numbers: + # + # # ``` python # def is_prime(n): # """ @@ -42,27 +71,23 @@ def test_code_execution_basic(self): # return False # return True # + # prime_count = 0 + # number = 2 # primes = [] - # num = 2 - # count = 0 - # while count < 50: - # if is_prime(num): - # primes.append(num) - # count += 1 - # num += 1 + # while prime_count < 50: + # if is_prime(number): + # primes.append(number) + # prime_count += 1 + # number += 1 # - # print(f'The first 50 prime numbers are: {primes}') # print(f'The sum of the first 50 prime numbers is: {sum(primes)}') # # ``` # ``` - # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229] # The sum of the first 50 prime numbers is: 5117 # # ``` - # The code generated a list of the first 50 prime numbers, then sums the list to find the answer. - # - # The sum of the first 50 prime numbers is **5117**. + # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117. # [END code_execution_basic_return] def test_code_execution_request_override(self): From 351daadb83455eef375e86a4ec742c948a194dae Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 11 Jul 2024 09:27:48 -0700 Subject: [PATCH 06/90] move model_configuration samples (#454) Change-Id: I4c0f02a52e9c63d5ee72874f24904c1c931cb4cb --- samples/{model_configuration.py => configure_model_parameters.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename samples/{model_configuration.py => configure_model_parameters.py} (100%) diff --git a/samples/model_configuration.py b/samples/configure_model_parameters.py similarity index 100% rename from samples/model_configuration.py rename to samples/configure_model_parameters.py From d74189f4ce04757662586895dedca775e7aefecb Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 11 Jul 2024 09:36:30 -0700 Subject: [PATCH 07/90] Update configure_model_parameters.py --- samples/configure_model_parameters.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/configure_model_parameters.py b/samples/configure_model_parameters.py index 54aec9763..ecd39d312 100644 --- a/samples/configure_model_parameters.py +++ b/samples/configure_model_parameters.py @@ -19,7 +19,7 @@ class UnitTests(absltest.TestCase): def test_configure_model(self): - # [START configure_model] + # [START configure_model_parameters] model = genai.GenerativeModel("gemini-1.5-flash") response = model.generate_content( "Tell me a story about a magic backpack.", @@ -33,7 +33,7 @@ def test_configure_model(self): ) print(response.text) - # [END configure_model] + # [END configure_model_parameters] if __name__ == "__main__": From 8642c8c31009e80543803ff2142fb31c9f1ed838 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 11 Jul 2024 10:03:11 -0700 Subject: [PATCH 08/90] move model_configuration samples (#456) Change-Id: Ic60ecebc3cc9a2f4455054dcdb769e338331e8e1 --- ...model_configuration.sh => configure_model_parameters.sh} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename samples/rest/{model_configuration.sh => configure_model_parameters.sh} (87%) diff --git a/samples/rest/model_configuration.sh b/samples/rest/configure_model_parameters.sh similarity index 87% rename from samples/rest/model_configuration.sh rename to samples/rest/configure_model_parameters.sh index ba10ff813..bd8d9d4c6 100644 --- a/samples/rest/model_configuration.sh +++ b/samples/rest/configure_model_parameters.sh @@ -1,7 +1,7 @@ set -eu -echo "[START configure_model]" -# [START configure_model] +echo "[START configure_model_parameters]" +# [START configure_model_parameters] curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ @@ -27,4 +27,4 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:ge "topK": 10 } }' 2> /dev/null | grep "text" -# [END configure_model] +# [END configure_model_parameters] From 7c2148642e3c50f642b2041073172f11b221f532 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Thu, 11 Jul 2024 11:08:28 -0700 Subject: [PATCH 09/90] Format code execution (#457) --- samples/code_execution.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/samples/code_execution.py b/samples/code_execution.py index 17f8142a0..6b5c97dc4 100644 --- a/samples/code_execution.py +++ b/samples/code_execution.py @@ -27,14 +27,14 @@ def test_code_execution_basic(self): "Generate and run code for the calculation, and make sure you get all 50." ) ) - + # Each `part` either contains `text`, `executable_code` or an `execution_result` for part in result.candidates[0].content.parts: - print(part, '\n') + print(part, "\n") - print('-'*80) - # The `.text` accessor joins the parts into a markdown compatible text representation. - print('\n\n', response.text) + print("-" * 80) + # The `.text` accessor joins the parts into a markdown compatible text representation. + print("\n\n", response.text) # [END code_execution_basic] # [START code_execution_basic_return] @@ -54,7 +54,7 @@ def test_code_execution_basic(self): # # # -------------------------------------------------------------------------------- - # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number. + # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number. # # Here is the code to find and sum the first 50 prime numbers: # @@ -87,7 +87,7 @@ def test_code_execution_basic(self): # The sum of the first 50 prime numbers is: 5117 # # ``` - # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117. + # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117. # [END code_execution_basic_return] def test_code_execution_request_override(self): From 950a666abd4f19c53b9a71a06a16b3af026fdc75 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Thu, 11 Jul 2024 11:20:08 -0700 Subject: [PATCH 10/90] Chat REST samples (#449) * Add first chat samples for rest * Add Chat rest examples * last message should be 'role:user' Change-Id: I3e06e9e0ffb553cfc70add5ed0365cb56e9fddff --------- Co-authored-by: Mark Daoust --- samples/rest/chat.sh | 93 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 samples/rest/chat.sh diff --git a/samples/rest/chat.sh b/samples/rest/chat.sh new file mode 100644 index 000000000..d5af4cfb5 --- /dev/null +++ b/samples/rest/chat.sh @@ -0,0 +1,93 @@ +set -eu + +SCRIPT_DIR=$(dirname "$0") +MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) + +echo "[START chat]" +# [START chat] +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [ + {"role":"user", + "parts":[{ + "text": "Hello"}]}, + {"role": "model", + "parts":[{ + "text": "Great to meet you. What would you like to know?"}]}, + {"role":"user", + "parts":[{ + "text": "I have two dogs in my house. How many paws are in my house?"}]}, + ] + }' 2> /dev/null | grep "text" +# [END chat] + +echo "[START chat_streaming]" +# [START chat_streaming] +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [ + {"role":"user", + "parts":[{ + "text": "Hello"}]}, + {"role": "model", + "parts":[{ + "text": "Great to meet you. What would you like to know?"}]}, + {"role":"user", + "parts":[{ + "text": "I have two dogs in my house. How many paws are in my house?"}]}, + ] + }' 2> /dev/null | grep "text" +# [END chat_streaming] + +echo "[START chat_streaming_with_images]" +# [START chat_streaming_with_images] +IMG_PATH=${MEDIA_DIR}/organ.jpg + +if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then + B64FLAGS="--input" +else + B64FLAGS="-w0" +fi + +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [ + { + "role": "user", + "parts": [ + { + "text": "Hello, I am interested in learning about musical instruments. Can I show you one?" + } + ] + }, + { + "role": "model", + "parts": [ + { + "text": "Certainly." + }, + ] + }, + { + "role": "user", + "parts": [ + { + "text": "Tell me about this instrument" + }, + { + "inline_data": { + "mime_type": "image/jpeg", + "data": "'$(base64 $B64FLAGS $IMG_PATH)'" + } + } + ] + } + ] + }' 2> /dev/null | grep "text" +# [END chat_streaming_with_images] \ No newline at end of file From 754d038d14cd1587c8143e41dfdde9741f0e7eba Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 11 Jul 2024 11:38:09 -0700 Subject: [PATCH 11/90] Update tuned_models.py (#458) --- samples/tuned_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/tuned_models.py b/samples/tuned_models.py index 29246347d..d328d8c30 100644 --- a/samples/tuned_models.py +++ b/samples/tuned_models.py @@ -77,7 +77,7 @@ def test_tuned_models_generate_content(self): model = genai.GenerativeModel(model_name="tunedModels/my-increment-model") result = model.generate_content("III") print(result.text) # "IV" - # [END tuned_models_create] + # [END tuned_models_generate_content] def test_tuned_models_get(self): # [START tuned_models_get] From 4e1dcd6f45f90937ef921be1e2cf1b316f4f0bf7 Mon Sep 17 00:00:00 2001 From: rachelsaunders <52258509+rachelsaunders@users.noreply.github.com> Date: Fri, 12 Jul 2024 16:44:11 +0200 Subject: [PATCH 12/90] Update count_tokens.py (#459) - integrated returns into main snippet - updated code comments - pulled text of prompts out of the requests to generate_content --- samples/count_tokens.py | 188 ++++++++++++++++------------------------ 1 file changed, 77 insertions(+), 111 deletions(-) diff --git a/samples/count_tokens.py b/samples/count_tokens.py index 827fe5f1d..a45457e38 100644 --- a/samples/count_tokens.py +++ b/samples/count_tokens.py @@ -24,16 +24,14 @@ class UnitTests(absltest.TestCase): def test_tokens_context_window(self): # [START tokens_context_window] model_info = genai.get_model("models/gemini-1.0-pro-001") - # Returns the "context window" for the model (the combined input and output token limits) + + # Returns the "context window" for the model, + # which is the combined input and output token limits. print(f"{model_info.input_token_limit=}") print(f"{model_info.output_token_limit=}") + # ( input_token_limit=30720, output_token_limit=2048 ) # [END tokens_context_window] - # [START tokens_context_window_return] - # input_token_limit=30720 - # output_token_limit=2048 - # [END tokens_context_window_return] - def test_tokens_text_only(self): # [START tokens_text_only] model = genai.GenerativeModel("models/gemini-1.5-flash") @@ -42,22 +40,18 @@ def test_tokens_text_only(self): # Call `count_tokens` to get the input token count (`total_tokens`). print("total_tokens: ", model.count_tokens(prompt)) + # ( total_tokens: 10 ) response = model.generate_content(prompt) - # Use `usage_metadata` to get both input and output token counts - # (`prompt_token_count` and `candidates_token_count`, respectively). + # On the response for `generate_content`, use `usage_metadata` + # to get separate input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively), + # as well as the combined token count (`total_token_count`). print(response.usage_metadata) + # ( prompt_token_count: 11, candidates_token_count: 73, total_token_count: 84 ) # [END tokens_text_only] - # [START tokens_text_only_return] - # total_tokens: total_tokens: 10 - # - # prompt_token_count: 11 - # candidates_token_count: 73 - # total_token_count: 84 - # [END tokens_text_only_return] - def test_tokens_chat(self): # [START tokens_chat] model = genai.GenerativeModel("models/gemini-1.5-flash") @@ -70,30 +64,26 @@ def test_tokens_chat(self): ) # Call `count_tokens` to get the input token count (`total_tokens`). print(model.count_tokens(chat.history)) + # ( total_tokens: 10 ) response = chat.send_message( "In one sentence, explain how a computer works to a young child." ) - # Use `usage_metadata` to get both input and output token counts - # (`prompt_token_count` and `candidates_token_count`, respectively). + + # On the response for `send_message`, use `usage_metadata` + # to get separate input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively), + # as well as the combined token count (`total_token_count`). print(response.usage_metadata) + # ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 ) - # TODO add comment... from google.generativeai.types.content_types import to_contents + # You can call `count_tokens` on the combined history and content of the next turn. print(model.count_tokens(chat.history + to_contents("What is the meaning of life?"))) + # ( total_tokens: 56 ) # [END tokens_chat] - # [START tokens_chat_return] - # total_tokens: 10 - # - # prompt_token_count: 25 - # candidates_token_count: 21 - # total_token_count: 46 - # - # total_tokens: 56 - # [END tokens_chat_return] - def test_tokens_multimodal_image_inline(self): # [START tokens_multimodal_image_inline] import PIL.Image @@ -103,25 +93,23 @@ def test_tokens_multimodal_image_inline(self): prompt = "Tell me about this image" your_image_file = PIL.Image.open("image.jpg") - # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). - # An image's display size does not affect its token count. - # Optionally, you can call `count_tokens` for the prompt and file separately. + # Call `count_tokens` to get the input token count + # of the combined text and file (`total_tokens`). + # An image's display or file size does not affect its token count. + # Optionally, you can call `count_tokens` for the text and file separately. print(model.count_tokens([prompt, your_image_file])) + # ( total_tokens: 263 ) response = model.generate_content([prompt, your_image_file]) - # Use `usage_metadata` to get both input and output token counts - # (`prompt_token_count` and `candidates_token_count`, respectively). + + # On the response for `generate_content`, use `usage_metadata` + # to get separate input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively), + # as well as the combined token count (`total_token_count`). print(response.usage_metadata) + # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 ) # [END tokens_multimodal_image_inline] - # [START tokens_multimodal_image_inline_return] - # total_tokens: 263 - # - # prompt_token_count: 264 - # candidates_token_count: 81 - # total_token_count: 345 - # [END tokens_multimodal_image_inline_return] - def test_tokens_multimodal_image_file_api(self): # [START tokens_multimodal_image_file_api] model = genai.GenerativeModel("models/gemini-1.5-flash") @@ -129,26 +117,23 @@ def test_tokens_multimodal_image_file_api(self): prompt = "Tell me about this image" your_image_file = genai.upload_file(path="image.jpg") - # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). - # An image's display size does not affect its token count. - # Optionally, you can call `count_tokens` for the prompt and file separately. + # Call `count_tokens` to get the input token count + # of the combined text and file (`total_tokens`). + # An image's display or file size does not affect its token count. + # Optionally, you can call `count_tokens` for the text and file separately. print(model.count_tokens([prompt, your_image_file])) + # ( total_tokens: 263 ) response = model.generate_content([prompt, your_image_file]) response.text - # Use `usage_metadata` to get both input and output token counts - # (`prompt_token_count` and `candidates_token_count`, respectively). + # On the response for `generate_content`, use `usage_metadata` + # to get separate input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively), + # as well as the combined token count (`total_token_count`). print(response.usage_metadata) + # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 ) # [END tokens_multimodal_image_file_api] - # [START tokens_multimodal_image_file_api_return] - # total_tokens: 263 - # - # prompt_token_count: 264 - # candidates_token_count: 80 - # total_token_count: 344 - # [END tokens_multimodal_image_file_api_return] - def test_tokens_multimodal_video_audio_file_api(self): # [START tokens_multimodal_video_audio_file_api] import time @@ -164,28 +149,24 @@ def test_tokens_multimodal_video_audio_file_api(self): time.sleep(5) your_file = genai.get_file(your_file.name) - # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). + # Call `count_tokens` to get the input token count + # of the combined text and video/audio file (`total_tokens`). # A video or audio file is converted to tokens at a fixed rate of tokens per second. - # Optionally, you can call `count_tokens` for the prompt and file separately. + # Optionally, you can call `count_tokens` for the text and file separately. print(model.count_tokens([prompt, your_file])) + # ( total_tokens: 300 ) response = model.generate_content([prompt, your_file]) - # Use `usage_metadata` to get both input and output token counts - # (`prompt_token_count` and `candidates_token_count`, respectively). + # On the response for `generate_content`, use `usage_metadata` + # to get separate input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively), + # as well as the combined token count (`total_token_count`). print(response.usage_metadata) + # ( prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 ) # [END tokens_multimodal_video_audio_file_api] - # [START tokens_multimodal_video_audio_file_api_return] - # processing video... - # total_tokens: 300 - # - # prompt_token_count: 301 - # candidates_token_count: 60 - # total_token_count: 361 - # [END tokens_multimodal_video_audio_file_api_return] - def test_tokens_cached_content(self): # [START tokens_cached_content] import time @@ -196,7 +177,7 @@ def test_tokens_cached_content(self): cache = genai.caching.CachedContent.create( model="models/gemini-1.5-flash-001", - # You could set the system_instruction and tools + # You can set the system_instruction and tools system_instruction=None, tools=None, contents=["Here the Apollo 11 transcript:", your_file], @@ -204,63 +185,55 @@ def test_tokens_cached_content(self): model = genai.GenerativeModel.from_cached_content(cache) - # Call `count_tokens` to get input token count of the combined text and file (`total_tokens`). - # A video or audio file is converted to tokens at a fixed rate of tokens per second. - # Optionally, you can call `count_tokens` for the prompt and file separately. prompt = "Please give a short summary of this file." + + # Call `count_tokens` to get input token count + # of the combined text and file (`total_tokens`). + # A video or audio file is converted to tokens at a fixed rate of tokens per second. + # Optionally, you can call `count_tokens` for the text and file separately. print(model.count_tokens(prompt)) + # ( total_tokens: 9 ) response = model.generate_content(prompt) - # Use `usage_metadata` to get both input and output token counts - # (`prompt_token_count` and `candidates_token_count`, respectively). + + # On the response for `generate_content`, use `usage_metadata` + # to get separate input and output token counts + # (`prompt_token_count` and `candidates_token_count`, respectively), + # as well as the cached content token count and the combined total token count. print(response.usage_metadata) + # ( prompt_token_count: 323393, cached_content_token_count: 323383, candidates_token_count: 64) + # ( total_token_count: 323457 ) cache.delete() # [END tokens_cached_content] - # [START tokens_cached_content_return] - # total_tokens: 9 - # - # prompt_token_count: 323393 - # cached_content_token_count: 323383 - # candidates_token_count: 64 - # total_token_count: 323457 - # [END tokens_cached_content_return] - def test_tokens_system_instruction(self): # [START tokens_system_instruction] model = genai.GenerativeModel(model_name="gemini-1.5-flash") - # The total token count includes everything sent to the generate_content request. - print(model.count_tokens("The quick brown fox jumps over the lazy dog.")) + prompt="The quick brown fox jumps over the lazy dog." + + print(model.count_tokens(prompt)) # total_tokens: 10 model = genai.GenerativeModel( model_name="gemini-1.5-flash", system_instruction="You are a cat. Your name is Neko." ) - # The total token count includes everything sent to the generate_content request. + # The total token count includes everything sent to the `generate_content` request. # When you use system instructions, the total token count increases. - print(model.count_tokens("The quick brown fox jumps over the lazy dog.")) + print(model.count_tokens(prompt)) + # ( total_tokens: 21 ) # [END tokens_system_instruction] - # [START tokens_system_instruction_return] - # total_tokens: 10 - # - # total_tokens: 21 - # [END tokens_system_instruction_return] - def test_tokens_tools(self): # [START tokens_tools] model = genai.GenerativeModel(model_name="gemini-1.5-flash") - # The total token count includes everything sent to the generate_content request. - print( - model.count_tokens( - "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" - ) - ) - # total_tokens: 10 + prompt="I have 57 cats, each owns 44 mittens, how many mittens is that in total?" + + print(model.count_tokens(prompt)) + # ( total_tokens: 22 ) def add(a: float, b: float): """returns a + b.""" @@ -282,19 +255,12 @@ def divide(a: float, b: float): "models/gemini-1.5-flash-001", tools=[add, subtract, multiply, divide] ) - print( - model.count_tokens( - "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" - ) - ) + # The total token count includes everything sent to the `generate_content` request. + # When you use tools (like function calling), the total token count increases. + print(model.count_tokens(prompt)) + # ( total_tokens: 206 ) # [END tokens_tools] - # [START tokens_tools_return] - # total_tokens: 22 - # - # total_tokens: 206 - # [END tokens_tools_return] - if __name__ == "__main__": absltest.main() From 8494231be1d9220e0ce8cda82d4f9a3209afb375 Mon Sep 17 00:00:00 2001 From: Guillaume Vernade Date: Wed, 17 Jul 2024 18:24:27 +0200 Subject: [PATCH 13/90] Formatting (using black) (#460) --- samples/count_tokens.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/count_tokens.py b/samples/count_tokens.py index a45457e38..beae3b288 100644 --- a/samples/count_tokens.py +++ b/samples/count_tokens.py @@ -69,7 +69,7 @@ def test_tokens_chat(self): response = chat.send_message( "In one sentence, explain how a computer works to a young child." ) - + # On the response for `send_message`, use `usage_metadata` # to get separate input and output token counts # (`prompt_token_count` and `candidates_token_count`, respectively), @@ -195,7 +195,7 @@ def test_tokens_cached_content(self): # ( total_tokens: 9 ) response = model.generate_content(prompt) - + # On the response for `generate_content`, use `usage_metadata` # to get separate input and output token counts # (`prompt_token_count` and `candidates_token_count`, respectively), @@ -211,7 +211,7 @@ def test_tokens_system_instruction(self): # [START tokens_system_instruction] model = genai.GenerativeModel(model_name="gemini-1.5-flash") - prompt="The quick brown fox jumps over the lazy dog." + prompt = "The quick brown fox jumps over the lazy dog." print(model.count_tokens(prompt)) # total_tokens: 10 @@ -230,7 +230,7 @@ def test_tokens_tools(self): # [START tokens_tools] model = genai.GenerativeModel(model_name="gemini-1.5-flash") - prompt="I have 57 cats, each owns 44 mittens, how many mittens is that in total?" + prompt = "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" print(model.count_tokens(prompt)) # ( total_tokens: 22 ) From 3491bfc3b91f1b374193aa3748cb31d5c78db554 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Wed, 17 Jul 2024 09:35:15 -0700 Subject: [PATCH 14/90] Adding count_tokens for rest (#444) * Adding count_tokens for rest * Update to have same prompt as python example * tests now working --- samples/rest/count_tokens.sh | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 samples/rest/count_tokens.sh diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh new file mode 100644 index 000000000..867e787b8 --- /dev/null +++ b/samples/rest/count_tokens.sh @@ -0,0 +1,32 @@ +set -eu + +echo "[START tokens_text_only]" +# [START tokens_text_only] +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[{ + "text": "The quick brown fox jumps over the lazy dog." + }], + }], + }' +# [END tokens_text_only] + +echo "[START tokens_chat]" +# [START tokens_chat] +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [ + {"role": "user", + "parts": [{"text": "Hi, my name is Bob."}], + }, + {"role": "model", + "parts":[{"text": "Hi Bob"}], + }, + ], + }' +# [END tokens_chat] \ No newline at end of file From e8ad6533b8d3e2cf2308fd092114a91951708240 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Wed, 17 Jul 2024 09:42:13 -0700 Subject: [PATCH 15/90] add safety settings examples for curl (#433) * add safety settings examples for curl * replace integers with category name --- samples/rest/safety_settings.sh | 38 +++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 samples/rest/safety_settings.sh diff --git a/samples/rest/safety_settings.sh b/samples/rest/safety_settings.sh new file mode 100644 index 000000000..f7eb45186 --- /dev/null +++ b/samples/rest/safety_settings.sh @@ -0,0 +1,38 @@ +set -eu + +echo "[START safety_settings]" +# [START safety_settings] +echo '{ + "safetySettings": [ + {'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH} + ], + "contents": [{ + "parts":[{ + "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json + + curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d @request.json 2> /dev/null > tee response.json + + jq .promptFeedback > response.json +# [END safety_settings] + +echo "[START safety_settings_multi]" +# [START safety_settings_multi] +echo '{ + "safetySettings": [ + {'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH}, + {'category': HARM_CATEGORY_HATE_SPEECH, 'threshold': BLOCK_MEDIUM_AND_ABOVE} + ], + "contents": [{ + "parts":[{ + "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json + + curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d @request.json 2> /dev/null > response.json + + jq .promptFeedback > response.json +# [END safety_settings_multi] From c5ef6c046dece66e723379c4ada41c04622ce374 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 17 Jul 2024 16:35:30 -0700 Subject: [PATCH 16/90] Add markdown docs (#462) Change-Id: I63ffaa1c0d4af92f4a630ea21c99f927095c1d34 --- .gitignore | 1 - docs/api/google/generativeai.md | 138 + docs/api/google/generativeai/ChatSession.md | 222 + .../google/generativeai/GenerativeModel.md | 429 + docs/api/google/generativeai/_api_cache.json | 9886 +++++++++++++++++ docs/api/google/generativeai/_redirects.yaml | 13 + docs/api/google/generativeai/_toc.yaml | 507 + docs/api/google/generativeai/all_symbols.md | 261 + docs/api/google/generativeai/api_report.pb | Bin 0 -> 49595 bytes docs/api/google/generativeai/chat.md | 198 + docs/api/google/generativeai/chat_async.md | 198 + docs/api/google/generativeai/configure.md | 80 + .../generativeai/count_message_tokens.md | 41 + .../google/generativeai/count_text_tokens.md | 37 + .../google/generativeai/create_tuned_model.md | 198 + docs/api/google/generativeai/delete_file.md | 34 + .../google/generativeai/delete_tuned_model.md | 36 + docs/api/google/generativeai/embed_content.md | 112 + .../generativeai/embed_content_async.md | 40 + .../generativeai/generate_embeddings.md | 90 + docs/api/google/generativeai/generate_text.md | 172 + .../api/google/generativeai/get_base_model.md | 87 + docs/api/google/generativeai/get_file.md | 34 + docs/api/google/generativeai/get_model.md | 87 + docs/api/google/generativeai/get_operation.md | 34 + .../google/generativeai/get_tuned_model.md | 87 + docs/api/google/generativeai/list_files.md | 34 + docs/api/google/generativeai/list_models.md | 87 + .../google/generativeai/list_operations.md | 34 + .../google/generativeai/list_tuned_models.md | 87 + docs/api/google/generativeai/protos.md | 368 + .../protos/AttributionSourceId.md | 73 + .../AttributionSourceId/GroundingPassageId.md | 59 + .../SemanticRetrieverChunk.md | 60 + .../protos/BatchCreateChunksRequest.md | 62 + .../protos/BatchCreateChunksResponse.md | 48 + .../protos/BatchDeleteChunksRequest.md | 61 + .../protos/BatchEmbedContentsRequest.md | 65 + .../protos/BatchEmbedContentsResponse.md | 50 + .../protos/BatchEmbedTextRequest.md | 71 + .../protos/BatchEmbedTextResponse.md | 49 + .../protos/BatchUpdateChunksRequest.md | 62 + .../protos/BatchUpdateChunksResponse.md | 48 + docs/api/google/generativeai/protos/Blob.md | 64 + .../generativeai/protos/CachedContent.md | 181 + .../protos/CachedContent/UsageMetadata.md | 49 + .../google/generativeai/protos/Candidate.md | 123 + .../protos/Candidate/FinishReason.md | 733 ++ docs/api/google/generativeai/protos/Chunk.md | 108 + .../google/generativeai/protos/Chunk/State.md | 696 ++ .../google/generativeai/protos/ChunkData.md | 51 + .../generativeai/protos/CitationMetadata.md | 48 + .../generativeai/protos/CitationSource.md | 88 + .../generativeai/protos/CodeExecution.md | 29 + .../protos/CodeExecutionResult.md | 65 + .../protos/CodeExecutionResult/Outcome.md | 699 ++ .../google/generativeai/protos/Condition.md | 80 + .../generativeai/protos/Condition/Operator.md | 782 ++ .../api/google/generativeai/protos/Content.md | 64 + .../generativeai/protos/ContentEmbedding.md | 48 + .../generativeai/protos/ContentFilter.md | 67 + docs/api/google/generativeai/protos/Corpus.md | 87 + .../protos/CountMessageTokensRequest.md | 66 + .../protos/CountMessageTokensResponse.md | 52 + .../protos/CountTextTokensRequest.md | 66 + .../protos/CountTextTokensResponse.md | 52 + .../generativeai/protos/CountTokensRequest.md | 77 + .../protos/CountTokensResponse.md | 64 + .../protos/CreateCachedContentRequest.md | 48 + .../generativeai/protos/CreateChunkRequest.md | 59 + .../protos/CreateCorpusRequest.md | 48 + .../protos/CreateDocumentRequest.md | 58 + .../generativeai/protos/CreateFileRequest.md | 48 + .../generativeai/protos/CreateFileResponse.md | 48 + .../protos/CreatePermissionRequest.md | 58 + .../protos/CreateTunedModelMetadata.md | 86 + .../protos/CreateTunedModelRequest.md | 62 + .../generativeai/protos/CustomMetadata.md | 87 + .../api/google/generativeai/protos/Dataset.md | 50 + .../protos/DeleteCachedContentRequest.md | 49 + .../generativeai/protos/DeleteChunkRequest.md | 50 + .../protos/DeleteCorpusRequest.md | 62 + .../protos/DeleteDocumentRequest.md | 62 + .../generativeai/protos/DeleteFileRequest.md | 49 + .../protos/DeletePermissionRequest.md | 50 + .../protos/DeleteTunedModelRequest.md | 49 + .../google/generativeai/protos/Document.md | 99 + .../protos/EmbedContentRequest.md | 103 + .../protos/EmbedContentResponse.md | 49 + .../generativeai/protos/EmbedTextRequest.md | 59 + .../generativeai/protos/EmbedTextResponse.md | 50 + .../google/generativeai/protos/Embedding.md | 48 + .../api/google/generativeai/protos/Example.md | 60 + .../generativeai/protos/ExecutableCode.md | 64 + .../protos/ExecutableCode/Language.md | 663 ++ docs/api/google/generativeai/protos/File.md | 164 + .../google/generativeai/protos/File/State.md | 698 ++ .../google/generativeai/protos/FileData.md | 58 + .../generativeai/protos/FunctionCall.md | 62 + .../protos/FunctionCallingConfig.md | 69 + .../protos/FunctionCallingConfig/Mode.md | 704 ++ .../protos/FunctionDeclaration.md | 80 + .../generativeai/protos/FunctionResponse.md | 61 + .../protos/GenerateAnswerRequest.md | 151 + .../GenerateAnswerRequest/AnswerStyle.md | 698 ++ .../protos/GenerateAnswerResponse.md | 104 + .../GenerateAnswerResponse/InputFeedback.md | 65 + .../InputFeedback/BlockReason.md | 680 ++ .../protos/GenerateContentRequest.md | 151 + .../protos/GenerateContentResponse.md | 86 + .../GenerateContentResponse/PromptFeedback.md | 64 + .../PromptFeedback/BlockReason.md | 680 ++ .../GenerateContentResponse/UsageMetadata.md | 80 + .../protos/GenerateMessageRequest.md | 124 + .../protos/GenerateMessageResponse.md | 75 + .../protos/GenerateTextRequest.md | 189 + .../protos/GenerateTextResponse.md | 78 + .../generativeai/protos/GenerationConfig.md | 172 + .../protos/GetCachedContentRequest.md | 49 + .../generativeai/protos/GetChunkRequest.md | 49 + .../generativeai/protos/GetCorpusRequest.md | 49 + .../generativeai/protos/GetDocumentRequest.md | 49 + .../generativeai/protos/GetFileRequest.md | 49 + .../generativeai/protos/GetModelRequest.md | 53 + .../protos/GetPermissionRequest.md | 52 + .../protos/GetTunedModelRequest.md | 50 + .../protos/GroundingAttribution.md | 59 + .../generativeai/protos/GroundingPassage.md | 58 + .../generativeai/protos/GroundingPassages.md | 48 + .../generativeai/protos/HarmCategory.md | 822 ++ .../generativeai/protos/Hyperparameters.md | 100 + .../protos/ListCachedContentsRequest.md | 68 + .../protos/ListCachedContentsResponse.md | 59 + .../generativeai/protos/ListChunksRequest.md | 80 + .../generativeai/protos/ListChunksResponse.md | 60 + .../generativeai/protos/ListCorporaRequest.md | 69 + .../protos/ListCorporaResponse.md | 60 + .../protos/ListDocumentsRequest.md | 79 + .../protos/ListDocumentsResponse.md | 60 + .../generativeai/protos/ListFilesRequest.md | 59 + .../generativeai/protos/ListFilesResponse.md | 58 + .../generativeai/protos/ListModelsRequest.md | 69 + .../generativeai/protos/ListModelsResponse.md | 60 + .../protos/ListPermissionsRequest.md | 80 + .../protos/ListPermissionsResponse.md | 60 + .../protos/ListTunedModelsRequest.md | 97 + .../protos/ListTunedModelsResponse.md | 60 + .../api/google/generativeai/protos/Message.md | 86 + .../generativeai/protos/MessagePrompt.md | 103 + .../generativeai/protos/MetadataFilter.md | 63 + docs/api/google/generativeai/protos/Model.md | 193 + docs/api/google/generativeai/protos/Part.md | 136 + .../google/generativeai/protos/Permission.md | 110 + .../protos/Permission/GranteeType.md | 698 ++ .../generativeai/protos/Permission/Role.md | 697 ++ .../generativeai/protos/QueryCorpusRequest.md | 109 + .../protos/QueryCorpusResponse.md | 48 + .../protos/QueryDocumentRequest.md | 109 + .../protos/QueryDocumentResponse.md | 48 + .../generativeai/protos/RelevantChunk.md | 57 + .../generativeai/protos/SafetyFeedback.md | 63 + .../generativeai/protos/SafetyRating.md | 77 + .../generativeai/protos/SafetySetting.md | 64 + docs/api/google/generativeai/protos/Schema.md | 132 + .../protos/Schema/PropertiesEntry.md | 89 + .../protos/SemanticRetrieverConfig.md | 92 + .../google/generativeai/protos/StringList.md | 48 + .../google/generativeai/protos/TaskType.md | 771 ++ .../generativeai/protos/TextCompletion.md | 74 + .../google/generativeai/protos/TextPrompt.md | 50 + docs/api/google/generativeai/protos/Tool.md | 73 + .../google/generativeai/protos/ToolConfig.md | 48 + .../protos/TransferOwnershipRequest.md | 61 + .../protos/TransferOwnershipResponse.md | 27 + .../google/generativeai/protos/TunedModel.md | 197 + .../generativeai/protos/TunedModelSource.md | 61 + .../generativeai/protos/TuningExample.md | 59 + .../generativeai/protos/TuningExamples.md | 50 + .../generativeai/protos/TuningSnapshot.md | 77 + .../google/generativeai/protos/TuningTask.md | 89 + docs/api/google/generativeai/protos/Type.md | 746 ++ .../protos/UpdateCachedContentRequest.md | 57 + .../generativeai/protos/UpdateChunkRequest.md | 58 + .../protos/UpdateCorpusRequest.md | 58 + .../protos/UpdateDocumentRequest.md | 58 + .../protos/UpdatePermissionRequest.md | 62 + .../protos/UpdateTunedModelRequest.md | 57 + .../generativeai/protos/VideoMetadata.md | 48 + docs/api/google/generativeai/types.md | 182 + .../generativeai/types/AnyModelNameOptions.md | 27 + .../types/AsyncGenerateContentResponse.md | 152 + .../google/generativeai/types/AuthorError.md | 27 + .../types/BaseModelNameOptions.md | 25 + .../api/google/generativeai/types/BlobDict.md | 27 + .../api/google/generativeai/types/BlobType.md | 26 + .../types/BlockedPromptException.md | 27 + .../generativeai/types/BlockedReason.md | 687 ++ .../generativeai/types/BrokenResponseError.md | 27 + .../types/CallableFunctionDeclaration.md | 144 + .../google/generativeai/types/ChatResponse.md | 223 + .../types/CitationMetadataDict.md | 48 + .../generativeai/types/CitationSourceDict.md | 84 + .../google/generativeai/types/Completion.md | 97 + .../google/generativeai/types/ContentDict.md | 27 + .../generativeai/types/ContentFilterDict.md | 62 + .../google/generativeai/types/ContentType.md | 38 + .../google/generativeai/types/ContentsType.md | 40 + .../google/generativeai/types/ExampleDict.md | 27 + .../generativeai/types/ExampleOptions.md | 26 + .../generativeai/types/ExamplesOptions.md | 27 + docs/api/google/generativeai/types/File.md | 170 + .../google/generativeai/types/FileDataDict.md | 27 + .../google/generativeai/types/FileDataType.md | 26 + .../generativeai/types/FunctionDeclaration.md | 121 + .../types/FunctionDeclarationType.md | 26 + .../generativeai/types/FunctionLibrary.md | 80 + .../generativeai/types/FunctionLibraryType.md | 32 + .../types/GenerateContentResponse.md | 185 + .../generativeai/types/GenerationConfig.md | 255 + .../types/GenerationConfigDict.md | 27 + .../types/GenerationConfigType.md | 25 + .../generativeai/types/HarmBlockThreshold.md | 722 ++ .../google/generativeai/types/HarmCategory.md | 657 ++ .../generativeai/types/HarmProbability.md | 724 ++ .../types/IncompleteIterationError.md | 27 + .../google/generativeai/types/MessageDict.md | 27 + .../generativeai/types/MessageOptions.md | 25 + .../generativeai/types/MessagePromptDict.md | 27 + .../types/MessagePromptOptions.md | 27 + .../generativeai/types/MessagesOptions.md | 26 + docs/api/google/generativeai/types/Model.md | 205 + .../generativeai/types/ModelsIterable.md | 23 + .../api/google/generativeai/types/PartDict.md | 27 + .../api/google/generativeai/types/PartType.md | 35 + .../google/generativeai/types/Permission.md | 274 + .../google/generativeai/types/Permissions.md | 386 + .../generativeai/types/RequestOptions.md | 209 + .../generativeai/types/RequestOptionsType.md | 24 + .../google/generativeai/types/ResponseDict.md | 27 + .../generativeai/types/SafetyFeedbackDict.md | 63 + .../generativeai/types/SafetyRatingDict.md | 73 + .../generativeai/types/SafetySettingDict.md | 60 + docs/api/google/generativeai/types/Status.md | 55 + .../types/StopCandidateException.md | 27 + .../generativeai/types/StrictContentType.md | 24 + docs/api/google/generativeai/types/Tool.md | 107 + .../api/google/generativeai/types/ToolDict.md | 27 + .../google/generativeai/types/ToolsType.md | 31 + .../google/generativeai/types/TunedModel.md | 272 + .../types/TunedModelNameOptions.md | 25 + .../generativeai/types/TunedModelState.md | 703 ++ .../google/generativeai/types/TypedDict.md | 73 + .../types/get_default_file_client.md | 30 + .../google/generativeai/types/to_file_data.md | 32 + .../google/generativeai/update_tuned_model.md | 38 + docs/api/google/generativeai/upload_file.md | 105 + 256 files changed, 42898 insertions(+), 1 deletion(-) create mode 100644 docs/api/google/generativeai.md create mode 100644 docs/api/google/generativeai/ChatSession.md create mode 100644 docs/api/google/generativeai/GenerativeModel.md create mode 100644 docs/api/google/generativeai/_api_cache.json create mode 100644 docs/api/google/generativeai/_redirects.yaml create mode 100644 docs/api/google/generativeai/_toc.yaml create mode 100644 docs/api/google/generativeai/all_symbols.md create mode 100644 docs/api/google/generativeai/api_report.pb create mode 100644 docs/api/google/generativeai/chat.md create mode 100644 docs/api/google/generativeai/chat_async.md create mode 100644 docs/api/google/generativeai/configure.md create mode 100644 docs/api/google/generativeai/count_message_tokens.md create mode 100644 docs/api/google/generativeai/count_text_tokens.md create mode 100644 docs/api/google/generativeai/create_tuned_model.md create mode 100644 docs/api/google/generativeai/delete_file.md create mode 100644 docs/api/google/generativeai/delete_tuned_model.md create mode 100644 docs/api/google/generativeai/embed_content.md create mode 100644 docs/api/google/generativeai/embed_content_async.md create mode 100644 docs/api/google/generativeai/generate_embeddings.md create mode 100644 docs/api/google/generativeai/generate_text.md create mode 100644 docs/api/google/generativeai/get_base_model.md create mode 100644 docs/api/google/generativeai/get_file.md create mode 100644 docs/api/google/generativeai/get_model.md create mode 100644 docs/api/google/generativeai/get_operation.md create mode 100644 docs/api/google/generativeai/get_tuned_model.md create mode 100644 docs/api/google/generativeai/list_files.md create mode 100644 docs/api/google/generativeai/list_models.md create mode 100644 docs/api/google/generativeai/list_operations.md create mode 100644 docs/api/google/generativeai/list_tuned_models.md create mode 100644 docs/api/google/generativeai/protos.md create mode 100644 docs/api/google/generativeai/protos/AttributionSourceId.md create mode 100644 docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md create mode 100644 docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md create mode 100644 docs/api/google/generativeai/protos/BatchCreateChunksRequest.md create mode 100644 docs/api/google/generativeai/protos/BatchCreateChunksResponse.md create mode 100644 docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md create mode 100644 docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md create mode 100644 docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md create mode 100644 docs/api/google/generativeai/protos/BatchEmbedTextRequest.md create mode 100644 docs/api/google/generativeai/protos/BatchEmbedTextResponse.md create mode 100644 docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md create mode 100644 docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md create mode 100644 docs/api/google/generativeai/protos/Blob.md create mode 100644 docs/api/google/generativeai/protos/CachedContent.md create mode 100644 docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md create mode 100644 docs/api/google/generativeai/protos/Candidate.md create mode 100644 docs/api/google/generativeai/protos/Candidate/FinishReason.md create mode 100644 docs/api/google/generativeai/protos/Chunk.md create mode 100644 docs/api/google/generativeai/protos/Chunk/State.md create mode 100644 docs/api/google/generativeai/protos/ChunkData.md create mode 100644 docs/api/google/generativeai/protos/CitationMetadata.md create mode 100644 docs/api/google/generativeai/protos/CitationSource.md create mode 100644 docs/api/google/generativeai/protos/CodeExecution.md create mode 100644 docs/api/google/generativeai/protos/CodeExecutionResult.md create mode 100644 docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md create mode 100644 docs/api/google/generativeai/protos/Condition.md create mode 100644 docs/api/google/generativeai/protos/Condition/Operator.md create mode 100644 docs/api/google/generativeai/protos/Content.md create mode 100644 docs/api/google/generativeai/protos/ContentEmbedding.md create mode 100644 docs/api/google/generativeai/protos/ContentFilter.md create mode 100644 docs/api/google/generativeai/protos/Corpus.md create mode 100644 docs/api/google/generativeai/protos/CountMessageTokensRequest.md create mode 100644 docs/api/google/generativeai/protos/CountMessageTokensResponse.md create mode 100644 docs/api/google/generativeai/protos/CountTextTokensRequest.md create mode 100644 docs/api/google/generativeai/protos/CountTextTokensResponse.md create mode 100644 docs/api/google/generativeai/protos/CountTokensRequest.md create mode 100644 docs/api/google/generativeai/protos/CountTokensResponse.md create mode 100644 docs/api/google/generativeai/protos/CreateCachedContentRequest.md create mode 100644 docs/api/google/generativeai/protos/CreateChunkRequest.md create mode 100644 docs/api/google/generativeai/protos/CreateCorpusRequest.md create mode 100644 docs/api/google/generativeai/protos/CreateDocumentRequest.md create mode 100644 docs/api/google/generativeai/protos/CreateFileRequest.md create mode 100644 docs/api/google/generativeai/protos/CreateFileResponse.md create mode 100644 docs/api/google/generativeai/protos/CreatePermissionRequest.md create mode 100644 docs/api/google/generativeai/protos/CreateTunedModelMetadata.md create mode 100644 docs/api/google/generativeai/protos/CreateTunedModelRequest.md create mode 100644 docs/api/google/generativeai/protos/CustomMetadata.md create mode 100644 docs/api/google/generativeai/protos/Dataset.md create mode 100644 docs/api/google/generativeai/protos/DeleteCachedContentRequest.md create mode 100644 docs/api/google/generativeai/protos/DeleteChunkRequest.md create mode 100644 docs/api/google/generativeai/protos/DeleteCorpusRequest.md create mode 100644 docs/api/google/generativeai/protos/DeleteDocumentRequest.md create mode 100644 docs/api/google/generativeai/protos/DeleteFileRequest.md create mode 100644 docs/api/google/generativeai/protos/DeletePermissionRequest.md create mode 100644 docs/api/google/generativeai/protos/DeleteTunedModelRequest.md create mode 100644 docs/api/google/generativeai/protos/Document.md create mode 100644 docs/api/google/generativeai/protos/EmbedContentRequest.md create mode 100644 docs/api/google/generativeai/protos/EmbedContentResponse.md create mode 100644 docs/api/google/generativeai/protos/EmbedTextRequest.md create mode 100644 docs/api/google/generativeai/protos/EmbedTextResponse.md create mode 100644 docs/api/google/generativeai/protos/Embedding.md create mode 100644 docs/api/google/generativeai/protos/Example.md create mode 100644 docs/api/google/generativeai/protos/ExecutableCode.md create mode 100644 docs/api/google/generativeai/protos/ExecutableCode/Language.md create mode 100644 docs/api/google/generativeai/protos/File.md create mode 100644 docs/api/google/generativeai/protos/File/State.md create mode 100644 docs/api/google/generativeai/protos/FileData.md create mode 100644 docs/api/google/generativeai/protos/FunctionCall.md create mode 100644 docs/api/google/generativeai/protos/FunctionCallingConfig.md create mode 100644 docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md create mode 100644 docs/api/google/generativeai/protos/FunctionDeclaration.md create mode 100644 docs/api/google/generativeai/protos/FunctionResponse.md create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerRequest.md create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse.md create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md create mode 100644 docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md create mode 100644 docs/api/google/generativeai/protos/GenerateContentRequest.md create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse.md create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md create mode 100644 docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md create mode 100644 docs/api/google/generativeai/protos/GenerateMessageRequest.md create mode 100644 docs/api/google/generativeai/protos/GenerateMessageResponse.md create mode 100644 docs/api/google/generativeai/protos/GenerateTextRequest.md create mode 100644 docs/api/google/generativeai/protos/GenerateTextResponse.md create mode 100644 docs/api/google/generativeai/protos/GenerationConfig.md create mode 100644 docs/api/google/generativeai/protos/GetCachedContentRequest.md create mode 100644 docs/api/google/generativeai/protos/GetChunkRequest.md create mode 100644 docs/api/google/generativeai/protos/GetCorpusRequest.md create mode 100644 docs/api/google/generativeai/protos/GetDocumentRequest.md create mode 100644 docs/api/google/generativeai/protos/GetFileRequest.md create mode 100644 docs/api/google/generativeai/protos/GetModelRequest.md create mode 100644 docs/api/google/generativeai/protos/GetPermissionRequest.md create mode 100644 docs/api/google/generativeai/protos/GetTunedModelRequest.md create mode 100644 docs/api/google/generativeai/protos/GroundingAttribution.md create mode 100644 docs/api/google/generativeai/protos/GroundingPassage.md create mode 100644 docs/api/google/generativeai/protos/GroundingPassages.md create mode 100644 docs/api/google/generativeai/protos/HarmCategory.md create mode 100644 docs/api/google/generativeai/protos/Hyperparameters.md create mode 100644 docs/api/google/generativeai/protos/ListCachedContentsRequest.md create mode 100644 docs/api/google/generativeai/protos/ListCachedContentsResponse.md create mode 100644 docs/api/google/generativeai/protos/ListChunksRequest.md create mode 100644 docs/api/google/generativeai/protos/ListChunksResponse.md create mode 100644 docs/api/google/generativeai/protos/ListCorporaRequest.md create mode 100644 docs/api/google/generativeai/protos/ListCorporaResponse.md create mode 100644 docs/api/google/generativeai/protos/ListDocumentsRequest.md create mode 100644 docs/api/google/generativeai/protos/ListDocumentsResponse.md create mode 100644 docs/api/google/generativeai/protos/ListFilesRequest.md create mode 100644 docs/api/google/generativeai/protos/ListFilesResponse.md create mode 100644 docs/api/google/generativeai/protos/ListModelsRequest.md create mode 100644 docs/api/google/generativeai/protos/ListModelsResponse.md create mode 100644 docs/api/google/generativeai/protos/ListPermissionsRequest.md create mode 100644 docs/api/google/generativeai/protos/ListPermissionsResponse.md create mode 100644 docs/api/google/generativeai/protos/ListTunedModelsRequest.md create mode 100644 docs/api/google/generativeai/protos/ListTunedModelsResponse.md create mode 100644 docs/api/google/generativeai/protos/Message.md create mode 100644 docs/api/google/generativeai/protos/MessagePrompt.md create mode 100644 docs/api/google/generativeai/protos/MetadataFilter.md create mode 100644 docs/api/google/generativeai/protos/Model.md create mode 100644 docs/api/google/generativeai/protos/Part.md create mode 100644 docs/api/google/generativeai/protos/Permission.md create mode 100644 docs/api/google/generativeai/protos/Permission/GranteeType.md create mode 100644 docs/api/google/generativeai/protos/Permission/Role.md create mode 100644 docs/api/google/generativeai/protos/QueryCorpusRequest.md create mode 100644 docs/api/google/generativeai/protos/QueryCorpusResponse.md create mode 100644 docs/api/google/generativeai/protos/QueryDocumentRequest.md create mode 100644 docs/api/google/generativeai/protos/QueryDocumentResponse.md create mode 100644 docs/api/google/generativeai/protos/RelevantChunk.md create mode 100644 docs/api/google/generativeai/protos/SafetyFeedback.md create mode 100644 docs/api/google/generativeai/protos/SafetyRating.md create mode 100644 docs/api/google/generativeai/protos/SafetySetting.md create mode 100644 docs/api/google/generativeai/protos/Schema.md create mode 100644 docs/api/google/generativeai/protos/Schema/PropertiesEntry.md create mode 100644 docs/api/google/generativeai/protos/SemanticRetrieverConfig.md create mode 100644 docs/api/google/generativeai/protos/StringList.md create mode 100644 docs/api/google/generativeai/protos/TaskType.md create mode 100644 docs/api/google/generativeai/protos/TextCompletion.md create mode 100644 docs/api/google/generativeai/protos/TextPrompt.md create mode 100644 docs/api/google/generativeai/protos/Tool.md create mode 100644 docs/api/google/generativeai/protos/ToolConfig.md create mode 100644 docs/api/google/generativeai/protos/TransferOwnershipRequest.md create mode 100644 docs/api/google/generativeai/protos/TransferOwnershipResponse.md create mode 100644 docs/api/google/generativeai/protos/TunedModel.md create mode 100644 docs/api/google/generativeai/protos/TunedModelSource.md create mode 100644 docs/api/google/generativeai/protos/TuningExample.md create mode 100644 docs/api/google/generativeai/protos/TuningExamples.md create mode 100644 docs/api/google/generativeai/protos/TuningSnapshot.md create mode 100644 docs/api/google/generativeai/protos/TuningTask.md create mode 100644 docs/api/google/generativeai/protos/Type.md create mode 100644 docs/api/google/generativeai/protos/UpdateCachedContentRequest.md create mode 100644 docs/api/google/generativeai/protos/UpdateChunkRequest.md create mode 100644 docs/api/google/generativeai/protos/UpdateCorpusRequest.md create mode 100644 docs/api/google/generativeai/protos/UpdateDocumentRequest.md create mode 100644 docs/api/google/generativeai/protos/UpdatePermissionRequest.md create mode 100644 docs/api/google/generativeai/protos/UpdateTunedModelRequest.md create mode 100644 docs/api/google/generativeai/protos/VideoMetadata.md create mode 100644 docs/api/google/generativeai/types.md create mode 100644 docs/api/google/generativeai/types/AnyModelNameOptions.md create mode 100644 docs/api/google/generativeai/types/AsyncGenerateContentResponse.md create mode 100644 docs/api/google/generativeai/types/AuthorError.md create mode 100644 docs/api/google/generativeai/types/BaseModelNameOptions.md create mode 100644 docs/api/google/generativeai/types/BlobDict.md create mode 100644 docs/api/google/generativeai/types/BlobType.md create mode 100644 docs/api/google/generativeai/types/BlockedPromptException.md create mode 100644 docs/api/google/generativeai/types/BlockedReason.md create mode 100644 docs/api/google/generativeai/types/BrokenResponseError.md create mode 100644 docs/api/google/generativeai/types/CallableFunctionDeclaration.md create mode 100644 docs/api/google/generativeai/types/ChatResponse.md create mode 100644 docs/api/google/generativeai/types/CitationMetadataDict.md create mode 100644 docs/api/google/generativeai/types/CitationSourceDict.md create mode 100644 docs/api/google/generativeai/types/Completion.md create mode 100644 docs/api/google/generativeai/types/ContentDict.md create mode 100644 docs/api/google/generativeai/types/ContentFilterDict.md create mode 100644 docs/api/google/generativeai/types/ContentType.md create mode 100644 docs/api/google/generativeai/types/ContentsType.md create mode 100644 docs/api/google/generativeai/types/ExampleDict.md create mode 100644 docs/api/google/generativeai/types/ExampleOptions.md create mode 100644 docs/api/google/generativeai/types/ExamplesOptions.md create mode 100644 docs/api/google/generativeai/types/File.md create mode 100644 docs/api/google/generativeai/types/FileDataDict.md create mode 100644 docs/api/google/generativeai/types/FileDataType.md create mode 100644 docs/api/google/generativeai/types/FunctionDeclaration.md create mode 100644 docs/api/google/generativeai/types/FunctionDeclarationType.md create mode 100644 docs/api/google/generativeai/types/FunctionLibrary.md create mode 100644 docs/api/google/generativeai/types/FunctionLibraryType.md create mode 100644 docs/api/google/generativeai/types/GenerateContentResponse.md create mode 100644 docs/api/google/generativeai/types/GenerationConfig.md create mode 100644 docs/api/google/generativeai/types/GenerationConfigDict.md create mode 100644 docs/api/google/generativeai/types/GenerationConfigType.md create mode 100644 docs/api/google/generativeai/types/HarmBlockThreshold.md create mode 100644 docs/api/google/generativeai/types/HarmCategory.md create mode 100644 docs/api/google/generativeai/types/HarmProbability.md create mode 100644 docs/api/google/generativeai/types/IncompleteIterationError.md create mode 100644 docs/api/google/generativeai/types/MessageDict.md create mode 100644 docs/api/google/generativeai/types/MessageOptions.md create mode 100644 docs/api/google/generativeai/types/MessagePromptDict.md create mode 100644 docs/api/google/generativeai/types/MessagePromptOptions.md create mode 100644 docs/api/google/generativeai/types/MessagesOptions.md create mode 100644 docs/api/google/generativeai/types/Model.md create mode 100644 docs/api/google/generativeai/types/ModelsIterable.md create mode 100644 docs/api/google/generativeai/types/PartDict.md create mode 100644 docs/api/google/generativeai/types/PartType.md create mode 100644 docs/api/google/generativeai/types/Permission.md create mode 100644 docs/api/google/generativeai/types/Permissions.md create mode 100644 docs/api/google/generativeai/types/RequestOptions.md create mode 100644 docs/api/google/generativeai/types/RequestOptionsType.md create mode 100644 docs/api/google/generativeai/types/ResponseDict.md create mode 100644 docs/api/google/generativeai/types/SafetyFeedbackDict.md create mode 100644 docs/api/google/generativeai/types/SafetyRatingDict.md create mode 100644 docs/api/google/generativeai/types/SafetySettingDict.md create mode 100644 docs/api/google/generativeai/types/Status.md create mode 100644 docs/api/google/generativeai/types/StopCandidateException.md create mode 100644 docs/api/google/generativeai/types/StrictContentType.md create mode 100644 docs/api/google/generativeai/types/Tool.md create mode 100644 docs/api/google/generativeai/types/ToolDict.md create mode 100644 docs/api/google/generativeai/types/ToolsType.md create mode 100644 docs/api/google/generativeai/types/TunedModel.md create mode 100644 docs/api/google/generativeai/types/TunedModelNameOptions.md create mode 100644 docs/api/google/generativeai/types/TunedModelState.md create mode 100644 docs/api/google/generativeai/types/TypedDict.md create mode 100644 docs/api/google/generativeai/types/get_default_file_client.md create mode 100644 docs/api/google/generativeai/types/to_file_data.md create mode 100644 docs/api/google/generativeai/update_tuned_model.md create mode 100644 docs/api/google/generativeai/upload_file.md diff --git a/.gitignore b/.gitignore index 10692be5c..72ac0ed80 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,6 @@ /.idea/ /.pytype/ /build/ -/docs/api *.egg-info .DS_Store __pycache__ diff --git a/docs/api/google/generativeai.md b/docs/api/google/generativeai.md new file mode 100644 index 000000000..23ee47866 --- /dev/null +++ b/docs/api/google/generativeai.md @@ -0,0 +1,138 @@ +description: Google AI Python SDK + +
+ + + + +
+ +# Module: google.generativeai + + + + + + + + + +Google AI Python SDK + + + +## Setup + +```posix-terminal +pip install google-generativeai +``` + +## GenerativeModel + +Use `genai.GenerativeModel` to access the API: + +``` +import google.generativeai as genai +import os + +genai.configure(api_key=os.environ['API_KEY']) + +model = genai.GenerativeModel(model_name='gemini-1.5-flash') +response = model.generate_content('Teach me about how an LLM works') + +print(response.text) +``` + +See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details. + +## Modules + +[`protos`](../google/generativeai/protos.md) module: This module provides low level access to the ProtoBuffer "Message" classes used by the API. + +[`types`](../google/generativeai/types.md) module: A collection of type definitions used throughout the library. + +## Classes + +[`class ChatSession`](../google/generativeai/ChatSession.md): Contains an ongoing conversation with the model. + +[`class GenerationConfig`](../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content. + +[`class GenerativeModel`](../google/generativeai/GenerativeModel.md): The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content, GenerativeModel.count_tokens, and GenerativeModel.start_chat. + +## Functions + +[`chat(...)`](../google/generativeai/chat.md): Calls the API to initiate a chat with a model using provided parameters + +[`chat_async(...)`](../google/generativeai/chat_async.md): Calls the API to initiate a chat with a model using provided parameters + +[`configure(...)`](../google/generativeai/configure.md): Captures default client configuration. + +[`count_message_tokens(...)`](../google/generativeai/count_message_tokens.md): Calls the API to calculate the number of tokens used in the prompt. + +[`count_text_tokens(...)`](../google/generativeai/count_text_tokens.md): Calls the API to count the number of tokens in the text prompt. + +[`create_tuned_model(...)`](../google/generativeai/create_tuned_model.md): Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress. + +[`delete_file(...)`](../google/generativeai/delete_file.md): Calls the API to permanently delete a specified file using a supported file service. + +[`delete_tuned_model(...)`](../google/generativeai/delete_tuned_model.md): Calls the API to delete a specified tuned model + +[`embed_content(...)`](../google/generativeai/embed_content.md): Calls the API to create embeddings for content passed in. + +[`embed_content_async(...)`](../google/generativeai/embed_content_async.md): Calls the API to create async embeddings for content passed in. + +[`generate_embeddings(...)`](../google/generativeai/generate_embeddings.md): Calls the API to create an embedding for the text passed in. + +[`generate_text(...)`](../google/generativeai/generate_text.md): Calls the API to generate text based on the provided prompt. + +[`get_base_model(...)`](../google/generativeai/get_base_model.md): Calls the API to fetch a base model by name. + +[`get_file(...)`](../google/generativeai/get_file.md): Calls the API to retrieve a specified file using a supported file service. + +[`get_model(...)`](../google/generativeai/get_model.md): Calls the API to fetch a model by name. + +[`get_operation(...)`](../google/generativeai/get_operation.md): Calls the API to get a specific operation + +[`get_tuned_model(...)`](../google/generativeai/get_tuned_model.md): Calls the API to fetch a tuned model by name. + +[`list_files(...)`](../google/generativeai/list_files.md): Calls the API to list files using a supported file service. + +[`list_models(...)`](../google/generativeai/list_models.md): Calls the API to list all available models. + +[`list_operations(...)`](../google/generativeai/list_operations.md): Calls the API to list all operations + +[`list_tuned_models(...)`](../google/generativeai/list_tuned_models.md): Calls the API to list all tuned models. + +[`update_tuned_model(...)`](../google/generativeai/update_tuned_model.md): Calls the API to push updates to a specified tuned model where only certain attributes are updatable. + +[`upload_file(...)`](../google/generativeai/upload_file.md): Calls the API to upload a file using a supported file service. + + + + + + + + + + + + + + + +
+__version__ + +`'0.7.2'` +
+annotations + +Instance of `__future__._Feature` +
+ diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md new file mode 100644 index 000000000..3898a2ef1 --- /dev/null +++ b/docs/api/google/generativeai/ChatSession.md @@ -0,0 +1,222 @@ +description: Contains an ongoing conversation with the model. + +
+ + + + + + +
+ +# google.generativeai.ChatSession + + + + + + + + + +Contains an ongoing conversation with the model. + + + + + + + +``` +>>> model = genai.GenerativeModel('models/gemini-pro') +>>> chat = model.start_chat() +>>> response = chat.send_message("Hello") +>>> print(response.text) +>>> response = chat.send_message("Hello again") +>>> print(response.text) +>>> response = chat.send_message(... +``` + +This `ChatSession` object collects the messages sent and received, in its +ChatSession.history attribute. + + + + + + + + + + + + + +
+`model` + +The model to use in the chat. +
+`history` + +A chat history to initialize the object with. +
+ + + + + + + + + + + + + + + + + +
+`history` + +The chat history. +
+`last` + +returns the last received `genai.GenerateContentResponse` +
+ + + +## Methods + +

rewind

+ +View source + + + +Removes the last request/response pair from the chat history. + + +

send_message

+ +View source + + + +Sends the conversation history with the added message and returns the model's response. + +Appends the request and response to the conversation history. + +``` +>>> model = genai.GenerativeModel('models/gemini-pro') +>>> chat = model.start_chat() +>>> response = chat.send_message("Hello") +>>> print(response.text) +"Hello! How can I assist you today?" +>>> len(chat.history) +2 +``` + +Call it with `stream=True` to receive response chunks as they are generated: + +``` +>>> chat = model.start_chat() +>>> response = chat.send_message("Explain quantum physics", stream=True) +>>> for chunk in response: +... print(chunk.text, end='') +``` + +Once iteration over chunks is complete, the `response` and `ChatSession` are in states identical to the +`stream=False` case. Some properties are not available until iteration is complete. + +Like GenerativeModel.generate_content this method lets you override the model's `generation_config` and +`safety_settings`. + + + + + + + + + + + + + + + + + + + +
Arguments
+`content` + +The message contents. +
+`generation_config` + +Overrides for the model's generation config. +
+`safety_settings` + +Overrides for the model's safety settings. +
+`stream` + +If True, yield response chunks as they are generated. +
+ + + +

send_message_async

+ +View source + + + +The async version of ChatSession.send_message. + + + + diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md new file mode 100644 index 000000000..9b9e7ff6f --- /dev/null +++ b/docs/api/google/generativeai/GenerativeModel.md @@ -0,0 +1,429 @@ +description: The genai.GenerativeModel class wraps default parameters for calls to GenerativeModel.generate_content, GenerativeModel.count_tokens, and GenerativeModel.start_chat. + +
+ + + + + + + + + +
+ +# google.generativeai.GenerativeModel + + + + + + + + + +The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content, GenerativeModel.count_tokens, and GenerativeModel.start_chat. + + + + + + + +This family of functionality is designed to support multi-turn conversations, and multimodal +requests. What media-types are supported for input and output is model-dependant. + +``` +>>> import google.generativeai as genai +>>> import PIL.Image +>>> genai.configure(api_key='YOUR_API_KEY') +>>> model = genai.GenerativeModel('models/gemini-pro') +>>> result = model.generate_content('Tell me a story about a magic backpack') +>>> result.text +"In the quaint little town of Lakeside, there lived a young girl named Lily..." +``` + +#### Multimodal input: + + + +``` +>>> model = genai.GenerativeModel('models/gemini-pro') +>>> result = model.generate_content([ +... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')]) +>>> result.text +"**Blueberry Scones** ..." +``` + +Multi-turn conversation: + +``` +>>> chat = model.start_chat() +>>> response = chat.send_message("Hi, I have some questions for you.") +>>> response.text +"Sure, I'll do my best to answer your questions..." +``` + +To list the compatible model names use: + +``` +>>> for m in genai.list_models(): +... if 'generateContent' in m.supported_generation_methods: +... print(m.name) +``` + + + + + + + + + + + + + + + + +
+`model_name` + +The name of the model to query. To list compatible models use +
+`safety_settings` + +Sets the default safety filters. This controls which content is blocked +by the api before being returned. +
+`generation_config` + +A `genai.GenerationConfig` setting the default generation parameters to +use. +
+ + + + + + + + + + + + + + + + + +
+`cached_content` + + +
+`model_name` + + +
+ + + +## Methods + +

count_tokens

+ +View source + + + + + + +

count_tokens_async

+ +View source + + + + + + +

from_cached_content

+ +View source + + + +Creates a model with `cached_content` as model's context. + + + + + + + + + + + + + + + + + +
Args
+`cached_content` + +context for the model. +
+`generation_config` + +Overrides for the model's generation config. +
+`safety_settings` + +Overrides for the model's safety settings. +
+ + + + + + + + + + + +
Returns
+`GenerativeModel` object with `cached_content` as its context. +
+ + + +

generate_content

+ +View source + + + +A multipurpose function to generate responses from the model. + +This GenerativeModel.generate_content method can handle multimodal input, and multi-turn +conversations. + +``` +>>> model = genai.GenerativeModel('models/gemini-pro') +>>> response = model.generate_content('Tell me a story about a magic backpack') +>>> response.text +``` + +### Streaming + +This method supports streaming with the `stream=True`. The result has the same type as the non streaming case, +but you can iterate over the response chunks as they become available: + +``` +>>> response = model.generate_content('Tell me a story about a magic backpack', stream=True) +>>> for chunk in response: +... print(chunk.text) +``` + +### Multi-turn + +This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each +request. This takes some manual management but gives you complete control: + +``` +>>> messages = [{'role':'user', 'parts': ['hello']}] +>>> response = model.generate_content(messages) # "Hello, how can I help" +>>> messages.append(response.candidates[0].content) +>>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']}) +>>> response = model.generate_content(messages) +``` + +For a simpler multi-turn interface see GenerativeModel.start_chat. + +### Input type flexibility + +While the underlying API strictly expects a `list[protos.Content]` objects, this method +will convert the user input into the correct type. The hierarchy of types that can be +converted is below. Any of these objects can be passed as an equivalent `dict`. + +* `Iterable[protos.Content]` +* protos.Content +* `Iterable[protos.Part]` +* protos.Part +* `str`, `Image`, or protos.Blob + +In an `Iterable[protos.Content]` each `content` is a separate message. +But note that an `Iterable[protos.Part]` is taken as the parts of a single message. + + + + + + + + + + + + + + + + + + + + + + + + + +
Arguments
+`contents` + +The contents serving as the model's prompt. +
+`generation_config` + +Overrides for the model's generation config. +
+`safety_settings` + +Overrides for the model's safety settings. +
+`stream` + +If True, yield response chunks as they are generated. +
+`tools` + +`protos.Tools` more info coming soon. +
+`request_options` + +Options for the request. +
+ + + +

generate_content_async

+ +View source + + + +The async version of GenerativeModel.generate_content. + + +

start_chat

+ +View source + + + +Returns a `genai.ChatSession` attached to this model. + +``` +>>> model = genai.GenerativeModel() +>>> chat = model.start_chat(history=[...]) +>>> response = chat.send_message("Hello?") +``` + + + + + + + + + + +
Arguments
+`history` + +An iterable of protos.Content objects, or equivalents to initialize the session. +
+ + + + + diff --git a/docs/api/google/generativeai/_api_cache.json b/docs/api/google/generativeai/_api_cache.json new file mode 100644 index 000000000..a1d446e53 --- /dev/null +++ b/docs/api/google/generativeai/_api_cache.json @@ -0,0 +1,9886 @@ +{ + "duplicate_of": { + "google.generativeai.ChatSession.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.ChatSession.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.ChatSession.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.ChatSession.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.ChatSession.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.ChatSession.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.ChatSession.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.GenerationConfig": "google.generativeai.types.GenerationConfig", + "google.generativeai.GenerationConfig.__eq__": "google.generativeai.types.GenerationConfig.__eq__", + "google.generativeai.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.GenerationConfig.__init__": "google.generativeai.types.GenerationConfig.__init__", + "google.generativeai.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.GenerationConfig.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.GenerativeModel.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.GenerativeModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.GenerativeModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.GenerativeModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.GenerativeModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.GenerativeModel.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.GenerativeModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.annotations": "google.generativeai.types.annotations", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.AttributionSourceId.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.AttributionSourceId.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.AttributionSourceId.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.AttributionSourceId.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.AttributionSourceId.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.AttributionSourceId.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.AttributionSourceId.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.AttributionSourceId.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchCreateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchCreateChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchCreateChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchCreateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchCreateChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchCreateChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchCreateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchCreateChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchCreateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchCreateChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchCreateChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchCreateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchCreateChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchCreateChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchCreateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchCreateChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchDeleteChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchDeleteChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchDeleteChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchEmbedContentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchEmbedContentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchEmbedContentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchEmbedContentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchEmbedTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchEmbedTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchEmbedTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchEmbedTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchEmbedTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchEmbedTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchEmbedTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchEmbedTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchEmbedTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchEmbedTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchUpdateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchUpdateChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchUpdateChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchUpdateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.BatchUpdateChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.BatchUpdateChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Blob.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Blob.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Blob.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Blob.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Blob.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Blob.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Blob.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Blob.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CachedContent.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CachedContent.UsageMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CachedContent.UsageMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CachedContent.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CachedContent.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CachedContent.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CachedContent.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CachedContent.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CachedContent.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CachedContent.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CachedContent.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Candidate.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Candidate.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Candidate.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Candidate.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Candidate.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Chunk.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.Chunk.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.Chunk.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.Chunk.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.Chunk.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.Chunk.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.Chunk.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.Chunk.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.Chunk.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.Chunk.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.Chunk.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.Chunk.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.Chunk.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.Chunk.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.Chunk.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.Chunk.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.Chunk.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.Chunk.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.Chunk.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.Chunk.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.Chunk.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.Chunk.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.Chunk.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.Chunk.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.Chunk.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.Chunk.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.Chunk.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.Chunk.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.Chunk.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.Chunk.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.Chunk.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.Chunk.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.Chunk.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.Chunk.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.Chunk.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.Chunk.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.Chunk.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.Chunk.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.Chunk.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.Chunk.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.Chunk.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.Chunk.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.Chunk.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Chunk.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.Chunk.State.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.Chunk.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.Chunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Chunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Chunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Chunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Chunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Chunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Chunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Chunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ChunkData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ChunkData.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ChunkData.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ChunkData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ChunkData.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ChunkData.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ChunkData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ChunkData.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CitationMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CitationMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CitationMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CitationMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CitationMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CitationMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CitationMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CitationMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CitationSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CitationSource.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CitationSource.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CitationSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CitationSource.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CitationSource.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CitationSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CitationSource.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CodeExecution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CodeExecution.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CodeExecution.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CodeExecution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CodeExecution.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CodeExecution.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CodeExecution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CodeExecution.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.CodeExecutionResult.Outcome.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.CodeExecutionResult.Outcome.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.CodeExecutionResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CodeExecutionResult.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CodeExecutionResult.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CodeExecutionResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CodeExecutionResult.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CodeExecutionResult.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CodeExecutionResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CodeExecutionResult.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Condition.Operator.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.Condition.Operator.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.Condition.Operator.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.Condition.Operator.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.Condition.Operator.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.Condition.Operator.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.Condition.Operator.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.Condition.Operator.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.Condition.Operator.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.Condition.Operator.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.Condition.Operator.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.Condition.Operator.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.Condition.Operator.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.Condition.Operator.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.Condition.Operator.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.Condition.Operator.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.Condition.Operator.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.Condition.Operator.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.Condition.Operator.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.Condition.Operator.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.Condition.Operator.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.Condition.Operator.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.Condition.Operator.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.Condition.Operator.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.Condition.Operator.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.Condition.Operator.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.Condition.Operator.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.Condition.Operator.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.Condition.Operator.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.Condition.Operator.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.Condition.Operator.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.Condition.Operator.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.Condition.Operator.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.Condition.Operator.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.Condition.Operator.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.Condition.Operator.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.Condition.Operator.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.Condition.Operator.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.Condition.Operator.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.Condition.Operator.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.Condition.Operator.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.Condition.Operator.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.Condition.Operator.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Condition.Operator.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.Condition.Operator.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.Condition.Operator.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.Condition.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Condition.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Condition.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Condition.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Condition.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Condition.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Condition.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Condition.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Content.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Content.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Content.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Content.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Content.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Content.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Content.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Content.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ContentEmbedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ContentEmbedding.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ContentEmbedding.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ContentEmbedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ContentEmbedding.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ContentEmbedding.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ContentEmbedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ContentEmbedding.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ContentFilter.BlockedReason": "google.generativeai.types.BlockedReason", + "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.ContentFilter.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.ContentFilter.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.ContentFilter.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.ContentFilter.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.ContentFilter.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.ContentFilter.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.ContentFilter.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.ContentFilter.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.ContentFilter.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.ContentFilter.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.ContentFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ContentFilter.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ContentFilter.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ContentFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ContentFilter.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ContentFilter.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ContentFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ContentFilter.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Corpus.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Corpus.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Corpus.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Corpus.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Corpus.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Corpus.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Corpus.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Corpus.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountMessageTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CountMessageTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CountMessageTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountMessageTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CountMessageTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CountMessageTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountMessageTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CountMessageTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountMessageTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CountMessageTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CountMessageTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountMessageTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CountMessageTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CountMessageTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountMessageTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CountMessageTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTextTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CountTextTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CountTextTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTextTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CountTextTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CountTextTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTextTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CountTextTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTextTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CountTextTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CountTextTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTextTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CountTextTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CountTextTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTextTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CountTextTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CountTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CountTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CountTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CountTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CountTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CountTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CountTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CountTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CountTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CountTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateFileResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateFileResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateFileResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateFileResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateFileResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateFileResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateFileResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateFileResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreatePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreatePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreatePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreatePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreatePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateTunedModelMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateTunedModelMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateTunedModelMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateTunedModelMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateTunedModelMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateTunedModelMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateTunedModelMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateTunedModelMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CreateTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CreateTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CreateTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CreateTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CreateTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CustomMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.CustomMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.CustomMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CustomMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.CustomMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.CustomMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CustomMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.CustomMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Dataset.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Dataset.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Dataset.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Dataset.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Dataset.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Dataset.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Dataset.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Dataset.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeleteCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeleteCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeleteCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeleteCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeleteCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeleteChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeleteChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeleteChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeleteChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeleteChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeleteCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeleteCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeleteCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeleteCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeleteCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeleteDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeleteDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeleteDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeleteDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeleteDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeleteFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeleteFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeleteFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeleteFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeleteFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeletePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeletePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeletePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeletePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeletePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeletePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeletePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeletePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DeleteTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.DeleteTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DeleteTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.DeleteTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DeleteTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Document.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Document.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Document.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Document.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Document.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Document.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Document.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Document.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.EmbedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.EmbedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.EmbedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.EmbedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.EmbedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.EmbedContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.EmbedContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.EmbedContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.EmbedContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.EmbedContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.EmbedTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.EmbedTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.EmbedTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.EmbedTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.EmbedTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.EmbedTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.EmbedTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.EmbedTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.EmbedTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.EmbedTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Embedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Embedding.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Embedding.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Embedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Embedding.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Embedding.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Embedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Embedding.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Example.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Example.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Example.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Example.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Example.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Example.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Example.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Example.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ExecutableCode.Language.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.ExecutableCode.Language.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.ExecutableCode.Language.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.ExecutableCode.Language.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.ExecutableCode.Language.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.ExecutableCode.Language.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.ExecutableCode.Language.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.ExecutableCode.Language.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.ExecutableCode.Language.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.ExecutableCode.Language.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.ExecutableCode.Language.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.ExecutableCode.Language.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.ExecutableCode.Language.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.ExecutableCode.Language.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.ExecutableCode.Language.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.ExecutableCode.Language.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.ExecutableCode.Language.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.ExecutableCode.Language.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.ExecutableCode.Language.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.ExecutableCode.Language.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.ExecutableCode.Language.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.ExecutableCode.Language.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.ExecutableCode.Language.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.ExecutableCode.Language.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.ExecutableCode.Language.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.ExecutableCode.Language.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.ExecutableCode.Language.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.ExecutableCode.Language.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.ExecutableCode.Language.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.ExecutableCode.Language.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.ExecutableCode.Language.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.ExecutableCode.Language.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.ExecutableCode.Language.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.ExecutableCode.Language.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.ExecutableCode.Language.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.ExecutableCode.Language.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.ExecutableCode.Language.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.ExecutableCode.Language.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.ExecutableCode.Language.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.ExecutableCode.Language.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.ExecutableCode.Language.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.ExecutableCode.Language.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.ExecutableCode.Language.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.ExecutableCode.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ExecutableCode.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ExecutableCode.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ExecutableCode.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ExecutableCode.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ExecutableCode.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ExecutableCode.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ExecutableCode.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.File.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.File.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.File.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.File.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.File.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.File.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.File.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.File.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.File.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.File.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.File.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.File.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.File.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.File.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.File.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.File.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.File.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.File.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.File.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.File.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.File.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.File.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.File.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.File.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.File.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.File.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.File.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.File.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.File.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.File.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.File.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.File.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.File.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.File.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.File.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.File.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.File.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.File.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.File.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.File.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.File.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.File.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.File.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.File.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.File.State.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.File.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.File.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.File.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.File.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.File.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.File.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.File.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.File.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.File.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FileData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.FileData.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.FileData.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FileData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.FileData.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.FileData.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FileData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.FileData.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionCall.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.FunctionCall.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.FunctionCall.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionCall.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.FunctionCall.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.FunctionCall.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionCall.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.FunctionCall.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.FunctionCallingConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.FunctionCallingConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.FunctionCallingConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.FunctionCallingConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.FunctionCallingConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionCallingConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.FunctionCallingConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.FunctionCallingConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionCallingConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.FunctionCallingConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionDeclaration.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.FunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.FunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionDeclaration.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.FunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.FunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionDeclaration.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.FunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.FunctionResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.FunctionResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.FunctionResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.FunctionResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.FunctionResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.GenerateAnswerRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateAnswerRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateAnswerRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateAnswerRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateAnswerRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateAnswerRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateAnswerRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateAnswerRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateAnswerResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateAnswerResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateAnswerResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateAnswerResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateAnswerResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateAnswerResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateAnswerResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateAnswerResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateMessageRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateMessageRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateMessageRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateMessageRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateMessageRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateMessageRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateMessageRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateMessageRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateMessageResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateMessageResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateMessageResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateMessageResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateMessageResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateMessageResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateMessageResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateMessageResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerateTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerateTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerateTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerateTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerateTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerationConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerationConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerationConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetPermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetPermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetPermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetPermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetPermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetPermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetPermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetPermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GetTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GetTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GetTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GetTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GetTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GroundingAttribution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingAttribution.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GroundingAttribution.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GroundingAttribution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingAttribution.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GroundingAttribution.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GroundingAttribution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingAttribution.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GroundingPassage.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingPassage.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GroundingPassage.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GroundingPassage.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingPassage.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GroundingPassage.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GroundingPassage.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingPassage.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GroundingPassages.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingPassages.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.GroundingPassages.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GroundingPassages.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingPassages.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.GroundingPassages.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GroundingPassages.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingPassages.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.Hyperparameters.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Hyperparameters.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Hyperparameters.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Hyperparameters.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Hyperparameters.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Hyperparameters.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Hyperparameters.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Hyperparameters.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCachedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListCachedContentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListCachedContentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCachedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListCachedContentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListCachedContentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCachedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListCachedContentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCachedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListCachedContentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListCachedContentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCachedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListCachedContentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListCachedContentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCachedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListCachedContentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCorporaRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListCorporaRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListCorporaRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCorporaRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListCorporaRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListCorporaRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCorporaRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListCorporaRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCorporaResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListCorporaResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListCorporaResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCorporaResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListCorporaResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListCorporaResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCorporaResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListCorporaResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListDocumentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListDocumentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListDocumentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListDocumentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListDocumentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListDocumentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListDocumentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListDocumentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListDocumentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListDocumentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListDocumentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListDocumentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListDocumentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListDocumentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListDocumentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListDocumentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListFilesRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListFilesRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListFilesRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListFilesRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListFilesRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListFilesRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListFilesRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListFilesRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListFilesResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListFilesResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListFilesResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListFilesResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListFilesResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListFilesResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListFilesResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListFilesResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListModelsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListModelsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListModelsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListModelsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListModelsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListModelsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListModelsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListModelsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListModelsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListModelsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListPermissionsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListPermissionsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListPermissionsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListPermissionsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListPermissionsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListPermissionsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListPermissionsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListPermissionsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListPermissionsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListPermissionsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListPermissionsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListPermissionsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListPermissionsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListPermissionsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListPermissionsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListPermissionsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListTunedModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListTunedModelsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListTunedModelsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListTunedModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListTunedModelsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListTunedModelsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListTunedModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListTunedModelsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListTunedModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ListTunedModelsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ListTunedModelsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListTunedModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ListTunedModelsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ListTunedModelsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListTunedModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ListTunedModelsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Message.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Message.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Message.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Message.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Message.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Message.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Message.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Message.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.MessagePrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.MessagePrompt.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.MessagePrompt.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.MessagePrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.MessagePrompt.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.MessagePrompt.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.MessagePrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.MessagePrompt.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.MetadataFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.MetadataFilter.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.MetadataFilter.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.MetadataFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.MetadataFilter.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.MetadataFilter.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.MetadataFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.MetadataFilter.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Model.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Model.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Model.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Model.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Model.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Model.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Model.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Model.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Part.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Part.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Part.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Part.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Part.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Part.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Part.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Part.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Permission.GranteeType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.Permission.GranteeType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.Permission.GranteeType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.Permission.GranteeType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.Permission.GranteeType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.Permission.GranteeType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.Permission.GranteeType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.Permission.GranteeType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.Permission.GranteeType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.Permission.GranteeType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.Permission.GranteeType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.Permission.GranteeType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.Permission.GranteeType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.Permission.GranteeType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.Permission.GranteeType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.Permission.GranteeType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.Permission.GranteeType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.Permission.GranteeType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.Permission.GranteeType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.Permission.GranteeType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.Permission.GranteeType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.Permission.GranteeType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.Permission.GranteeType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.Permission.GranteeType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.Permission.GranteeType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.Permission.GranteeType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.Permission.GranteeType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.Permission.GranteeType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.Permission.GranteeType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.Permission.GranteeType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.Permission.GranteeType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.Permission.GranteeType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.Permission.GranteeType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.Permission.GranteeType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.Permission.GranteeType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.Permission.GranteeType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.Permission.GranteeType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.Permission.GranteeType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.Permission.GranteeType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.Permission.GranteeType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.Permission.GranteeType.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Permission.GranteeType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.Permission.GranteeType.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.Permission.GranteeType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.Permission.Role.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.Permission.Role.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.Permission.Role.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.Permission.Role.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.Permission.Role.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.Permission.Role.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.Permission.Role.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.Permission.Role.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.Permission.Role.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.Permission.Role.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.Permission.Role.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.Permission.Role.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.Permission.Role.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.Permission.Role.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.Permission.Role.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.Permission.Role.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.Permission.Role.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.Permission.Role.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.Permission.Role.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.Permission.Role.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.Permission.Role.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.Permission.Role.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.Permission.Role.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.Permission.Role.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.Permission.Role.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.Permission.Role.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.Permission.Role.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.Permission.Role.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.Permission.Role.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.Permission.Role.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.Permission.Role.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.Permission.Role.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.Permission.Role.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.Permission.Role.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.Permission.Role.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.Permission.Role.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.Permission.Role.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.Permission.Role.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.Permission.Role.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.Permission.Role.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.Permission.Role.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.Permission.Role.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.Permission.Role.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Permission.Role.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.Permission.Role.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.Permission.Role.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.Permission.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Permission.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Permission.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Permission.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Permission.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Permission.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Permission.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Permission.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.QueryCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.QueryCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.QueryCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.QueryCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.QueryCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryCorpusResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.QueryCorpusResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.QueryCorpusResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryCorpusResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.QueryCorpusResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.QueryCorpusResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryCorpusResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.QueryCorpusResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.QueryDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.QueryDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.QueryDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.QueryDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.QueryDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryDocumentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.QueryDocumentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.QueryDocumentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryDocumentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.QueryDocumentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.QueryDocumentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryDocumentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.QueryDocumentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.RelevantChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.RelevantChunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.RelevantChunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.RelevantChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.RelevantChunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.RelevantChunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.RelevantChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.RelevantChunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SafetyFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.SafetyFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.SafetyFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SafetyFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.SafetyFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.SafetyFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SafetyFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.SafetyFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SafetyRating.HarmProbability": "google.generativeai.types.HarmProbability", + "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.SafetyRating.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.SafetyRating.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.SafetyRating.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.SafetyRating.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.SafetyRating.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.SafetyRating.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.SafetyRating.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.SafetyRating.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.SafetyRating.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.SafetyRating.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.SafetyRating.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.SafetyRating.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.SafetyRating.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SafetyRating.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.SafetyRating.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.SafetyRating.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SafetyRating.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.SafetyRating.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold": "google.generativeai.types.HarmBlockThreshold", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.SafetySetting.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.SafetySetting.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.SafetySetting.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SafetySetting.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.SafetySetting.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.SafetySetting.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SafetySetting.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.SafetySetting.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Schema.PropertiesEntry.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Schema.PropertiesEntry.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Schema.PropertiesEntry.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Schema.PropertiesEntry.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Schema.PropertiesEntry.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Schema.PropertiesEntry.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Schema.PropertiesEntry.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Schema.PropertiesEntry.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Schema.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Schema.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Schema.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Schema.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Schema.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Schema.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Schema.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Schema.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SemanticRetrieverConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.SemanticRetrieverConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.SemanticRetrieverConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SemanticRetrieverConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.SemanticRetrieverConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.SemanticRetrieverConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SemanticRetrieverConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.SemanticRetrieverConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.StringList.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.StringList.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.StringList.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.StringList.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.StringList.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.StringList.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.StringList.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.StringList.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TaskType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.TaskType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.TaskType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.TaskType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.TaskType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.TaskType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.TaskType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.TaskType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.TaskType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.TaskType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.TaskType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.TaskType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.TaskType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.TaskType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.TaskType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.TaskType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.TaskType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.TaskType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.TaskType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.TaskType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.TaskType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.TaskType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.TaskType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.TaskType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.TaskType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.TaskType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.TaskType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.TaskType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.TaskType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.TaskType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.TaskType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.TaskType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.TaskType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.TaskType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.TaskType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.TaskType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.TaskType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.TaskType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.TaskType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.TaskType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.TaskType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.TaskType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.TaskType.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.TaskType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.TaskType.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.TaskType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.TextCompletion.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TextCompletion.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TextCompletion.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TextCompletion.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TextCompletion.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TextCompletion.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TextCompletion.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TextCompletion.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TextPrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TextPrompt.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TextPrompt.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TextPrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TextPrompt.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TextPrompt.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TextPrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TextPrompt.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Tool.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Tool.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.Tool.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Tool.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Tool.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.Tool.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Tool.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Tool.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ToolConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.ToolConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.ToolConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ToolConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.ToolConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.ToolConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ToolConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.ToolConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TransferOwnershipRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TransferOwnershipRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TransferOwnershipRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TransferOwnershipRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TransferOwnershipRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TransferOwnershipRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TransferOwnershipRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TransferOwnershipRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TransferOwnershipResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TransferOwnershipResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TransferOwnershipResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TransferOwnershipResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TransferOwnershipResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TransferOwnershipResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TransferOwnershipResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TransferOwnershipResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TunedModel.State": "google.generativeai.types.TunedModelState", + "google.generativeai.protos.TunedModel.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.TunedModel.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.TunedModel.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.TunedModel.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.TunedModel.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.TunedModel.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.TunedModel.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.TunedModel.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.TunedModel.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.TunedModel.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.TunedModel.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.TunedModel.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.TunedModel.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.TunedModel.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.TunedModel.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.TunedModel.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.TunedModel.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.TunedModel.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.TunedModel.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.TunedModel.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.TunedModel.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.TunedModel.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.TunedModel.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.TunedModel.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.TunedModel.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.TunedModel.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.TunedModel.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.TunedModel.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.TunedModel.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.TunedModel.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.TunedModel.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.TunedModel.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.TunedModel.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.TunedModel.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.TunedModel.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.TunedModel.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.TunedModel.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.TunedModel.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.TunedModel.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.TunedModel.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.TunedModel.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.TunedModel.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.TunedModel.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.TunedModel.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.TunedModel.State.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.TunedModel.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.TunedModel.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TunedModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TunedModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TunedModel.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TunedModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TunedModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TunedModel.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TunedModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TunedModelSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TunedModelSource.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TunedModelSource.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TunedModelSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TunedModelSource.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TunedModelSource.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TunedModelSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TunedModelSource.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningExample.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TuningExample.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TuningExample.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningExample.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TuningExample.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TuningExample.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningExample.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TuningExample.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningExamples.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TuningExamples.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TuningExamples.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningExamples.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TuningExamples.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TuningExamples.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningExamples.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TuningExamples.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningSnapshot.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TuningSnapshot.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TuningSnapshot.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningSnapshot.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TuningSnapshot.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TuningSnapshot.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningSnapshot.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TuningSnapshot.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningTask.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.TuningTask.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.TuningTask.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningTask.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.TuningTask.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.TuningTask.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningTask.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.TuningTask.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Type.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.Type.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.Type.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.Type.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.Type.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.Type.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.Type.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.Type.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.Type.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.Type.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.Type.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.Type.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.Type.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.Type.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.Type.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.Type.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.Type.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.Type.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.Type.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.Type.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.Type.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.Type.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.Type.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.Type.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.Type.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.Type.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.Type.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.Type.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.Type.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.Type.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.Type.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.Type.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.Type.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.Type.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.Type.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.Type.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.Type.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.Type.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.Type.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.Type.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.Type.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.Type.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.Type.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Type.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.Type.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.Type.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.UpdateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.UpdateCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.UpdateCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.UpdateCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.UpdateCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.UpdateCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.UpdateChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.UpdateChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.UpdateChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.UpdateChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.UpdateChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.UpdateCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.UpdateCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.UpdateCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.UpdateCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.UpdateCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.UpdateDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.UpdateDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.UpdateDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.UpdateDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.UpdateDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.UpdatePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.UpdatePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.UpdatePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.UpdatePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.UpdatePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.UpdateTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.UpdateTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.UpdateTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.UpdateTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.UpdateTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.VideoMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.VideoMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.protos.VideoMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.VideoMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.VideoMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.protos.VideoMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.VideoMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.VideoMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.AsyncGenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.AsyncGenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.AsyncGenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.AsyncGenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.AsyncGenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.AuthorError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.AuthorError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.AuthorError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.AuthorError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.AuthorError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.AuthorError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.BlockedPromptException.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.BlockedPromptException.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.BlockedPromptException.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.BlockedPromptException.__init__": "google.generativeai.types.AuthorError.__init__", + "google.generativeai.types.BlockedPromptException.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.BlockedPromptException.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.BlockedPromptException.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.BlockedPromptException.__new__": "google.generativeai.types.AuthorError.__new__", + "google.generativeai.types.BlockedPromptException.add_note": "google.generativeai.types.AuthorError.add_note", + "google.generativeai.types.BlockedPromptException.args": "google.generativeai.types.AuthorError.args", + "google.generativeai.types.BlockedPromptException.with_traceback": "google.generativeai.types.AuthorError.with_traceback", + "google.generativeai.types.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.types.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.types.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.types.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.types.BlockedReason.__contains__": "google.generativeai.protos.ContentFilter.BlockedReason.__contains__", + "google.generativeai.types.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.types.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.types.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.types.BlockedReason.__getitem__": "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__", + "google.generativeai.types.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.types.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.types.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.types.BlockedReason.__iter__": "google.generativeai.protos.ContentFilter.BlockedReason.__iter__", + "google.generativeai.types.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.types.BlockedReason.__len__": "google.generativeai.protos.ContentFilter.BlockedReason.__len__", + "google.generativeai.types.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.types.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.types.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.types.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.types.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.types.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.types.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.types.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.types.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.types.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.types.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.types.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.types.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.types.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.types.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.types.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.types.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.types.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.types.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.types.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.types.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.types.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.types.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.types.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.types.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.types.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.types.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.types.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.types.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.types.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.types.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.types.BlockedReason.from_bytes": "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes", + "google.generativeai.types.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.types.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.types.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.types.BrokenResponseError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.BrokenResponseError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.BrokenResponseError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.BrokenResponseError.__init__": "google.generativeai.types.AuthorError.__init__", + "google.generativeai.types.BrokenResponseError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.BrokenResponseError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.BrokenResponseError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.BrokenResponseError.__new__": "google.generativeai.types.AuthorError.__new__", + "google.generativeai.types.BrokenResponseError.add_note": "google.generativeai.types.AuthorError.add_note", + "google.generativeai.types.BrokenResponseError.args": "google.generativeai.types.AuthorError.args", + "google.generativeai.types.BrokenResponseError.with_traceback": "google.generativeai.types.AuthorError.with_traceback", + "google.generativeai.types.CallableFunctionDeclaration.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.CallableFunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.CallableFunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.CallableFunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.CallableFunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.CallableFunctionDeclaration.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.CallableFunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.CallableFunctionDeclaration.description": "google.generativeai.types.FunctionDeclaration.description", + "google.generativeai.types.CallableFunctionDeclaration.from_function": "google.generativeai.types.FunctionDeclaration.from_function", + "google.generativeai.types.CallableFunctionDeclaration.name": "google.generativeai.types.FunctionDeclaration.name", + "google.generativeai.types.CallableFunctionDeclaration.parameters": "google.generativeai.types.FunctionDeclaration.parameters", + "google.generativeai.types.CallableFunctionDeclaration.to_proto": "google.generativeai.types.FunctionDeclaration.to_proto", + "google.generativeai.types.ChatResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.ChatResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.ChatResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.ChatResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.ChatResponse.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.ChatResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.CitationMetadataDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.CitationMetadataDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.CitationMetadataDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.CitationMetadataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.CitationMetadataDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.CitationMetadataDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.CitationMetadataDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.CitationMetadataDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.CitationMetadataDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.CitationMetadataDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.CitationMetadataDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.CitationMetadataDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.CitationMetadataDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.CitationMetadataDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.CitationMetadataDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.CitationMetadataDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.CitationMetadataDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.CitationMetadataDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.CitationMetadataDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.CitationMetadataDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.CitationMetadataDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.CitationMetadataDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.CitationMetadataDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.CitationMetadataDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.CitationSourceDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.CitationSourceDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.CitationSourceDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.CitationSourceDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.CitationSourceDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.CitationSourceDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.CitationSourceDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.CitationSourceDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.CitationSourceDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.CitationSourceDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.CitationSourceDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.CitationSourceDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.CitationSourceDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.CitationSourceDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.CitationSourceDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.CitationSourceDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.CitationSourceDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.CitationSourceDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.CitationSourceDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.CitationSourceDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.CitationSourceDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.CitationSourceDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.CitationSourceDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.CitationSourceDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.Completion.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.Completion.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.Completion.__init__": "google.generativeai.types.ChatResponse.__init__", + "google.generativeai.types.Completion.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.Completion.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.Completion.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.Completion.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.ContentDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.ContentDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.ContentDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.ContentDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.ContentDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.ContentDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.ContentDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.ContentDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.ContentDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.ContentDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.ContentDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.ContentDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.ContentDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.ContentDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.ContentDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.ContentDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.ContentDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.ContentDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.ContentDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.ContentDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.ContentDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.ContentDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.ContentDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.ContentDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.ContentFilterDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.ContentFilterDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.ContentFilterDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.ContentFilterDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.ContentFilterDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.ContentFilterDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.ContentFilterDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.ContentFilterDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.ContentFilterDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.ContentFilterDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.ContentFilterDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.ContentFilterDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.ContentFilterDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.ContentFilterDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.ContentFilterDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.ContentFilterDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.ContentFilterDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.ContentFilterDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.ContentFilterDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.ContentFilterDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.ContentFilterDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.ContentFilterDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.ContentFilterDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.ContentFilterDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.ExampleDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.ExampleDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.ExampleDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.ExampleDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.ExampleDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.ExampleDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.ExampleDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.ExampleDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.ExampleDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.ExampleDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.ExampleDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.ExampleDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.ExampleDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.ExampleDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.ExampleDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.ExampleDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.ExampleDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.ExampleDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.ExampleDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.ExampleDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.ExampleDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.ExampleDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.ExampleDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.ExampleDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.File.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.File.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.File.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.File.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.File.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.File.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.File.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.FileDataDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.FileDataDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.FileDataDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.FileDataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.FileDataDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.FileDataDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.FileDataDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.FileDataDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.FileDataDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.FileDataDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.FileDataDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.FileDataDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.FileDataDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.FileDataDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.FileDataDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.FileDataDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.FileDataDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.FileDataDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.FileDataDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.FileDataDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.FileDataDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.FileDataDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.FileDataDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.FileDataDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.FunctionDeclaration.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.FunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.FunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.FunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.FunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.FunctionDeclaration.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.FunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.FunctionLibrary.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.FunctionLibrary.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.FunctionLibrary.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.FunctionLibrary.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.FunctionLibrary.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.FunctionLibrary.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.FunctionLibrary.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.GenerateContentResponse.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.GenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.GenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.GenerateContentResponse.__init__": "google.generativeai.types.AsyncGenerateContentResponse.__init__", + "google.generativeai.types.GenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.GenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.GenerateContentResponse.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.GenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.GenerateContentResponse.candidates": "google.generativeai.types.AsyncGenerateContentResponse.candidates", + "google.generativeai.types.GenerateContentResponse.parts": "google.generativeai.types.AsyncGenerateContentResponse.parts", + "google.generativeai.types.GenerateContentResponse.prompt_feedback": "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback", + "google.generativeai.types.GenerateContentResponse.text": "google.generativeai.types.AsyncGenerateContentResponse.text", + "google.generativeai.types.GenerateContentResponse.to_dict": "google.generativeai.types.AsyncGenerateContentResponse.to_dict", + "google.generativeai.types.GenerateContentResponse.usage_metadata": "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata", + "google.generativeai.types.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.GenerationConfig.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.GenerationConfigDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.GenerationConfigDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.GenerationConfigDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.GenerationConfigDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.GenerationConfigDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.GenerationConfigDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.GenerationConfigDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.GenerationConfigDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.GenerationConfigDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.GenerationConfigDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.GenerationConfigDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.GenerationConfigDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.GenerationConfigDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.GenerationConfigDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.GenerationConfigDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.GenerationConfigDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.GenerationConfigDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.GenerationConfigDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.GenerationConfigDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.GenerationConfigDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.GenerationConfigDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.GenerationConfigDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.GenerationConfigDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.GenerationConfigDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.types.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.types.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.types.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.types.HarmBlockThreshold.__contains__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__", + "google.generativeai.types.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.types.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.types.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.types.HarmBlockThreshold.__getitem__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__", + "google.generativeai.types.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.types.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.types.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.types.HarmBlockThreshold.__iter__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__", + "google.generativeai.types.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.types.HarmBlockThreshold.__len__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__", + "google.generativeai.types.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.types.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.types.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.types.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.types.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.types.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.types.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.types.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.types.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.types.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.types.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.types.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.types.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.types.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.types.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.types.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.types.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.types.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.types.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.types.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.types.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.types.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.types.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.types.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.types.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.types.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.types.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.types.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.types.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.types.HarmBlockThreshold.from_bytes": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes", + "google.generativeai.types.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.types.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.types.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.types.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.types.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.types.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.types.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.types.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.types.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.types.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.types.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.types.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.types.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.types.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.types.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.types.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.types.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.types.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.types.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.types.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.types.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.types.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.types.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.types.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.types.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.types.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.types.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.types.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.types.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.types.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.types.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.types.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.types.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.types.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.types.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.types.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.types.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.types.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.types.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.types.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.types.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.types.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.types.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.types.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.types.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.types.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.types.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.types.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.types.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.types.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.types.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.types.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.types.HarmProbability.__contains__": "google.generativeai.protos.SafetyRating.HarmProbability.__contains__", + "google.generativeai.types.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.types.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.types.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.types.HarmProbability.__getitem__": "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__", + "google.generativeai.types.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.types.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.types.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.types.HarmProbability.__iter__": "google.generativeai.protos.SafetyRating.HarmProbability.__iter__", + "google.generativeai.types.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.types.HarmProbability.__len__": "google.generativeai.protos.SafetyRating.HarmProbability.__len__", + "google.generativeai.types.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.types.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.types.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.types.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.types.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.types.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.types.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.types.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.types.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.types.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.types.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.types.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.types.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.types.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.types.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.types.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.types.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.types.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.types.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.types.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.types.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.types.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.types.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.types.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.types.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.types.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.types.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.types.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.types.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.types.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.types.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.types.HarmProbability.from_bytes": "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes", + "google.generativeai.types.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.types.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.types.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.types.IncompleteIterationError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.IncompleteIterationError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.IncompleteIterationError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.IncompleteIterationError.__init__": "google.generativeai.types.AuthorError.__init__", + "google.generativeai.types.IncompleteIterationError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.IncompleteIterationError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.IncompleteIterationError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.IncompleteIterationError.__new__": "google.generativeai.types.AuthorError.__new__", + "google.generativeai.types.IncompleteIterationError.add_note": "google.generativeai.types.AuthorError.add_note", + "google.generativeai.types.IncompleteIterationError.args": "google.generativeai.types.AuthorError.args", + "google.generativeai.types.IncompleteIterationError.with_traceback": "google.generativeai.types.AuthorError.with_traceback", + "google.generativeai.types.MessageDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.MessageDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.MessageDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.MessageDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.MessageDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.MessageDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.MessageDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.MessageDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.MessageDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.MessageDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.MessageDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.MessageDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.MessageDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.MessageDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.MessageDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.MessageDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.MessageDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.MessageDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.MessageDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.MessageDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.MessageDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.MessageDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.MessageDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.MessageDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.MessagePromptDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.MessagePromptDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.MessagePromptDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.MessagePromptDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.MessagePromptDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.MessagePromptDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.MessagePromptDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.MessagePromptDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.MessagePromptDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.MessagePromptDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.MessagePromptDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.MessagePromptDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.MessagePromptDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.MessagePromptDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.MessagePromptDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.MessagePromptDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.MessagePromptDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.MessagePromptDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.MessagePromptDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.MessagePromptDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.MessagePromptDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.MessagePromptDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.MessagePromptDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.MessagePromptDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.Model.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.Model.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.Model.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.Model.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.Model.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.Model.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.ModelNameOptions": "google.generativeai.types.AnyModelNameOptions", + "google.generativeai.types.PartDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.PartDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.PartDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.PartDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.PartDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.PartDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.PartDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.PartDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.PartDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.PartDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.PartDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.PartDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.PartDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.PartDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.PartDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.PartDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.PartDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.PartDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.PartDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.PartDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.PartDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.PartDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.PartDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.PartDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.Permission.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.Permission.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.Permission.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.Permission.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.Permission.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.Permission.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.Permissions.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.Permissions.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.Permissions.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.Permissions.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.Permissions.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.Permissions.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.Permissions.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.RequestOptions.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.RequestOptions.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.RequestOptions.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.RequestOptions.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.RequestOptions.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.RequestOptions.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.ResponseDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.ResponseDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.ResponseDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.ResponseDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.ResponseDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.ResponseDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.ResponseDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.ResponseDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.ResponseDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.ResponseDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.ResponseDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.ResponseDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.ResponseDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.ResponseDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.ResponseDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.ResponseDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.ResponseDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.ResponseDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.ResponseDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.ResponseDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.ResponseDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.ResponseDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.ResponseDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.ResponseDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.SafetyFeedbackDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.SafetyFeedbackDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.SafetyFeedbackDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.SafetyFeedbackDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.SafetyFeedbackDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.SafetyFeedbackDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.SafetyFeedbackDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.SafetyFeedbackDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.SafetyFeedbackDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.SafetyFeedbackDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.SafetyFeedbackDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.SafetyFeedbackDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.SafetyFeedbackDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.SafetyFeedbackDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.SafetyFeedbackDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.SafetyFeedbackDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.SafetyFeedbackDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.SafetyFeedbackDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.SafetyFeedbackDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.SafetyFeedbackDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.SafetyFeedbackDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.SafetyFeedbackDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.SafetyFeedbackDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.SafetyFeedbackDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.SafetyRatingDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.SafetyRatingDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.SafetyRatingDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.SafetyRatingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.SafetyRatingDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.SafetyRatingDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.SafetyRatingDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.SafetyRatingDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.SafetyRatingDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.SafetyRatingDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.SafetyRatingDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.SafetyRatingDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.SafetyRatingDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.SafetyRatingDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.SafetyRatingDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.SafetyRatingDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.SafetyRatingDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.SafetyRatingDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.SafetyRatingDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.SafetyRatingDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.SafetyRatingDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.SafetyRatingDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.SafetyRatingDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.SafetyRatingDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.SafetySettingDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.SafetySettingDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.SafetySettingDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.SafetySettingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.SafetySettingDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.SafetySettingDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.SafetySettingDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.SafetySettingDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.SafetySettingDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.SafetySettingDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.SafetySettingDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.SafetySettingDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.SafetySettingDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.SafetySettingDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.SafetySettingDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.SafetySettingDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.SafetySettingDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.SafetySettingDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.SafetySettingDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.SafetySettingDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.SafetySettingDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.SafetySettingDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.SafetySettingDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.SafetySettingDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.StopCandidateException.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.StopCandidateException.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.StopCandidateException.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.StopCandidateException.__init__": "google.generativeai.types.AuthorError.__init__", + "google.generativeai.types.StopCandidateException.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.StopCandidateException.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.StopCandidateException.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.StopCandidateException.__new__": "google.generativeai.types.AuthorError.__new__", + "google.generativeai.types.StopCandidateException.add_note": "google.generativeai.types.AuthorError.add_note", + "google.generativeai.types.StopCandidateException.args": "google.generativeai.types.AuthorError.args", + "google.generativeai.types.StopCandidateException.with_traceback": "google.generativeai.types.AuthorError.with_traceback", + "google.generativeai.types.Tool.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", + "google.generativeai.types.Tool.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.Tool.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.Tool.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.Tool.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.Tool.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.Tool.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.ToolDict.__contains__": "google.generativeai.types.BlobDict.__contains__", + "google.generativeai.types.ToolDict.__eq__": "google.generativeai.types.BlobDict.__eq__", + "google.generativeai.types.ToolDict.__ge__": "google.generativeai.types.BlobDict.__ge__", + "google.generativeai.types.ToolDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", + "google.generativeai.types.ToolDict.__gt__": "google.generativeai.types.BlobDict.__gt__", + "google.generativeai.types.ToolDict.__init__": "google.generativeai.types.BlobDict.__init__", + "google.generativeai.types.ToolDict.__iter__": "google.generativeai.types.BlobDict.__iter__", + "google.generativeai.types.ToolDict.__le__": "google.generativeai.types.BlobDict.__le__", + "google.generativeai.types.ToolDict.__len__": "google.generativeai.types.BlobDict.__len__", + "google.generativeai.types.ToolDict.__lt__": "google.generativeai.types.BlobDict.__lt__", + "google.generativeai.types.ToolDict.__ne__": "google.generativeai.types.BlobDict.__ne__", + "google.generativeai.types.ToolDict.__new__": "google.generativeai.types.BlobDict.__new__", + "google.generativeai.types.ToolDict.__or__": "google.generativeai.types.BlobDict.__or__", + "google.generativeai.types.ToolDict.__ror__": "google.generativeai.types.BlobDict.__ror__", + "google.generativeai.types.ToolDict.clear": "google.generativeai.types.BlobDict.clear", + "google.generativeai.types.ToolDict.copy": "google.generativeai.types.BlobDict.copy", + "google.generativeai.types.ToolDict.get": "google.generativeai.types.BlobDict.get", + "google.generativeai.types.ToolDict.items": "google.generativeai.types.BlobDict.items", + "google.generativeai.types.ToolDict.keys": "google.generativeai.types.BlobDict.keys", + "google.generativeai.types.ToolDict.pop": "google.generativeai.types.BlobDict.pop", + "google.generativeai.types.ToolDict.popitem": "google.generativeai.types.BlobDict.popitem", + "google.generativeai.types.ToolDict.setdefault": "google.generativeai.types.BlobDict.setdefault", + "google.generativeai.types.ToolDict.update": "google.generativeai.types.BlobDict.update", + "google.generativeai.types.ToolDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.TunedModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", + "google.generativeai.types.TunedModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.TunedModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", + "google.generativeai.types.TunedModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.types.TunedModel.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", + "google.generativeai.types.TunedModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.TunedModelState.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.types.TunedModelState.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.types.TunedModelState.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.types.TunedModelState.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.types.TunedModelState.__contains__": "google.generativeai.protos.TunedModel.State.__contains__", + "google.generativeai.types.TunedModelState.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.types.TunedModelState.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.types.TunedModelState.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.types.TunedModelState.__getitem__": "google.generativeai.protos.TunedModel.State.__getitem__", + "google.generativeai.types.TunedModelState.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.types.TunedModelState.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.types.TunedModelState.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.types.TunedModelState.__iter__": "google.generativeai.protos.TunedModel.State.__iter__", + "google.generativeai.types.TunedModelState.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.types.TunedModelState.__len__": "google.generativeai.protos.TunedModel.State.__len__", + "google.generativeai.types.TunedModelState.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.types.TunedModelState.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.types.TunedModelState.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.types.TunedModelState.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.types.TunedModelState.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.types.TunedModelState.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.types.TunedModelState.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.types.TunedModelState.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.types.TunedModelState.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.types.TunedModelState.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.types.TunedModelState.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.types.TunedModelState.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.types.TunedModelState.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.types.TunedModelState.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.types.TunedModelState.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.types.TunedModelState.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.types.TunedModelState.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.types.TunedModelState.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.types.TunedModelState.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.types.TunedModelState.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.types.TunedModelState.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.types.TunedModelState.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.types.TunedModelState.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.types.TunedModelState.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.types.TunedModelState.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.types.TunedModelState.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.types.TunedModelState.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.types.TunedModelState.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.types.TunedModelState.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.types.TunedModelState.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.types.TunedModelState.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.types.TunedModelState.from_bytes": "google.generativeai.protos.TunedModel.State.from_bytes", + "google.generativeai.types.TunedModelState.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.TunedModelState.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.types.TunedModelState.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.types.TunedModelState.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes" + }, + "is_fragment": { + "google.generativeai": false, + "google.generativeai.ChatSession": false, + "google.generativeai.ChatSession.__eq__": true, + "google.generativeai.ChatSession.__ge__": true, + "google.generativeai.ChatSession.__gt__": true, + "google.generativeai.ChatSession.__init__": true, + "google.generativeai.ChatSession.__le__": true, + "google.generativeai.ChatSession.__lt__": true, + "google.generativeai.ChatSession.__ne__": true, + "google.generativeai.ChatSession.__new__": true, + "google.generativeai.ChatSession.history": true, + "google.generativeai.ChatSession.last": true, + "google.generativeai.ChatSession.rewind": true, + "google.generativeai.ChatSession.send_message": true, + "google.generativeai.ChatSession.send_message_async": true, + "google.generativeai.GenerationConfig": false, + "google.generativeai.GenerationConfig.__eq__": true, + "google.generativeai.GenerationConfig.__ge__": true, + "google.generativeai.GenerationConfig.__gt__": true, + "google.generativeai.GenerationConfig.__init__": true, + "google.generativeai.GenerationConfig.__le__": true, + "google.generativeai.GenerationConfig.__lt__": true, + "google.generativeai.GenerationConfig.__ne__": true, + "google.generativeai.GenerationConfig.__new__": true, + "google.generativeai.GenerationConfig.candidate_count": true, + "google.generativeai.GenerationConfig.max_output_tokens": true, + "google.generativeai.GenerationConfig.response_mime_type": true, + "google.generativeai.GenerationConfig.response_schema": true, + "google.generativeai.GenerationConfig.stop_sequences": true, + "google.generativeai.GenerationConfig.temperature": true, + "google.generativeai.GenerationConfig.top_k": true, + "google.generativeai.GenerationConfig.top_p": true, + "google.generativeai.GenerativeModel": false, + "google.generativeai.GenerativeModel.__eq__": true, + "google.generativeai.GenerativeModel.__ge__": true, + "google.generativeai.GenerativeModel.__gt__": true, + "google.generativeai.GenerativeModel.__init__": true, + "google.generativeai.GenerativeModel.__le__": true, + "google.generativeai.GenerativeModel.__lt__": true, + "google.generativeai.GenerativeModel.__ne__": true, + "google.generativeai.GenerativeModel.__new__": true, + "google.generativeai.GenerativeModel.cached_content": true, + "google.generativeai.GenerativeModel.count_tokens": true, + "google.generativeai.GenerativeModel.count_tokens_async": true, + "google.generativeai.GenerativeModel.from_cached_content": true, + "google.generativeai.GenerativeModel.generate_content": true, + "google.generativeai.GenerativeModel.generate_content_async": true, + "google.generativeai.GenerativeModel.model_name": true, + "google.generativeai.GenerativeModel.start_chat": true, + "google.generativeai.__version__": true, + "google.generativeai.annotations": true, + "google.generativeai.chat": false, + "google.generativeai.chat_async": false, + "google.generativeai.configure": false, + "google.generativeai.count_message_tokens": false, + "google.generativeai.count_text_tokens": false, + "google.generativeai.create_tuned_model": false, + "google.generativeai.delete_file": false, + "google.generativeai.delete_tuned_model": false, + "google.generativeai.embed_content": false, + "google.generativeai.embed_content_async": false, + "google.generativeai.generate_embeddings": false, + "google.generativeai.generate_text": false, + "google.generativeai.get_base_model": false, + "google.generativeai.get_file": false, + "google.generativeai.get_model": false, + "google.generativeai.get_operation": false, + "google.generativeai.get_tuned_model": false, + "google.generativeai.list_files": false, + "google.generativeai.list_models": false, + "google.generativeai.list_operations": false, + "google.generativeai.list_tuned_models": false, + "google.generativeai.protos": false, + "google.generativeai.protos.AttributionSourceId": false, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId": false, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__call__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__or__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ror__": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.mro": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.part_index": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.passage_id": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": true, + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": false, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__call__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__or__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ror__": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.chunk": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.mro": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.source": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": true, + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": true, + "google.generativeai.protos.AttributionSourceId.__call__": true, + "google.generativeai.protos.AttributionSourceId.__eq__": true, + "google.generativeai.protos.AttributionSourceId.__ge__": true, + "google.generativeai.protos.AttributionSourceId.__gt__": true, + "google.generativeai.protos.AttributionSourceId.__init__": true, + "google.generativeai.protos.AttributionSourceId.__le__": true, + "google.generativeai.protos.AttributionSourceId.__lt__": true, + "google.generativeai.protos.AttributionSourceId.__ne__": true, + "google.generativeai.protos.AttributionSourceId.__new__": true, + "google.generativeai.protos.AttributionSourceId.__or__": true, + "google.generativeai.protos.AttributionSourceId.__ror__": true, + "google.generativeai.protos.AttributionSourceId.copy_from": true, + "google.generativeai.protos.AttributionSourceId.deserialize": true, + "google.generativeai.protos.AttributionSourceId.from_json": true, + "google.generativeai.protos.AttributionSourceId.grounding_passage": true, + "google.generativeai.protos.AttributionSourceId.mro": true, + "google.generativeai.protos.AttributionSourceId.pb": true, + "google.generativeai.protos.AttributionSourceId.semantic_retriever_chunk": true, + "google.generativeai.protos.AttributionSourceId.serialize": true, + "google.generativeai.protos.AttributionSourceId.to_dict": true, + "google.generativeai.protos.AttributionSourceId.to_json": true, + "google.generativeai.protos.AttributionSourceId.wrap": true, + "google.generativeai.protos.BatchCreateChunksRequest": false, + "google.generativeai.protos.BatchCreateChunksRequest.__call__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__eq__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__ge__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__gt__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__init__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__le__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__lt__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__ne__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__new__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__or__": true, + "google.generativeai.protos.BatchCreateChunksRequest.__ror__": true, + "google.generativeai.protos.BatchCreateChunksRequest.copy_from": true, + "google.generativeai.protos.BatchCreateChunksRequest.deserialize": true, + "google.generativeai.protos.BatchCreateChunksRequest.from_json": true, + "google.generativeai.protos.BatchCreateChunksRequest.mro": true, + "google.generativeai.protos.BatchCreateChunksRequest.parent": true, + "google.generativeai.protos.BatchCreateChunksRequest.pb": true, + "google.generativeai.protos.BatchCreateChunksRequest.requests": true, + "google.generativeai.protos.BatchCreateChunksRequest.serialize": true, + "google.generativeai.protos.BatchCreateChunksRequest.to_dict": true, + "google.generativeai.protos.BatchCreateChunksRequest.to_json": true, + "google.generativeai.protos.BatchCreateChunksRequest.wrap": true, + "google.generativeai.protos.BatchCreateChunksResponse": false, + "google.generativeai.protos.BatchCreateChunksResponse.__call__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__eq__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__ge__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__gt__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__init__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__le__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__lt__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__ne__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__new__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__or__": true, + "google.generativeai.protos.BatchCreateChunksResponse.__ror__": true, + "google.generativeai.protos.BatchCreateChunksResponse.chunks": true, + "google.generativeai.protos.BatchCreateChunksResponse.copy_from": true, + "google.generativeai.protos.BatchCreateChunksResponse.deserialize": true, + "google.generativeai.protos.BatchCreateChunksResponse.from_json": true, + "google.generativeai.protos.BatchCreateChunksResponse.mro": true, + "google.generativeai.protos.BatchCreateChunksResponse.pb": true, + "google.generativeai.protos.BatchCreateChunksResponse.serialize": true, + "google.generativeai.protos.BatchCreateChunksResponse.to_dict": true, + "google.generativeai.protos.BatchCreateChunksResponse.to_json": true, + "google.generativeai.protos.BatchCreateChunksResponse.wrap": true, + "google.generativeai.protos.BatchDeleteChunksRequest": false, + "google.generativeai.protos.BatchDeleteChunksRequest.__call__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__init__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__le__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__new__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__or__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.__ror__": true, + "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": true, + "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": true, + "google.generativeai.protos.BatchDeleteChunksRequest.from_json": true, + "google.generativeai.protos.BatchDeleteChunksRequest.mro": true, + "google.generativeai.protos.BatchDeleteChunksRequest.parent": true, + "google.generativeai.protos.BatchDeleteChunksRequest.pb": true, + "google.generativeai.protos.BatchDeleteChunksRequest.requests": true, + "google.generativeai.protos.BatchDeleteChunksRequest.serialize": true, + "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": true, + "google.generativeai.protos.BatchDeleteChunksRequest.to_json": true, + "google.generativeai.protos.BatchDeleteChunksRequest.wrap": true, + "google.generativeai.protos.BatchEmbedContentsRequest": false, + "google.generativeai.protos.BatchEmbedContentsRequest.__call__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__init__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__le__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__new__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__or__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.__ror__": true, + "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": true, + "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": true, + "google.generativeai.protos.BatchEmbedContentsRequest.from_json": true, + "google.generativeai.protos.BatchEmbedContentsRequest.model": true, + "google.generativeai.protos.BatchEmbedContentsRequest.mro": true, + "google.generativeai.protos.BatchEmbedContentsRequest.pb": true, + "google.generativeai.protos.BatchEmbedContentsRequest.requests": true, + "google.generativeai.protos.BatchEmbedContentsRequest.serialize": true, + "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": true, + "google.generativeai.protos.BatchEmbedContentsRequest.to_json": true, + "google.generativeai.protos.BatchEmbedContentsRequest.wrap": true, + "google.generativeai.protos.BatchEmbedContentsResponse": false, + "google.generativeai.protos.BatchEmbedContentsResponse.__call__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__init__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__le__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__new__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__or__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.__ror__": true, + "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": true, + "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": true, + "google.generativeai.protos.BatchEmbedContentsResponse.embeddings": true, + "google.generativeai.protos.BatchEmbedContentsResponse.from_json": true, + "google.generativeai.protos.BatchEmbedContentsResponse.mro": true, + "google.generativeai.protos.BatchEmbedContentsResponse.pb": true, + "google.generativeai.protos.BatchEmbedContentsResponse.serialize": true, + "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": true, + "google.generativeai.protos.BatchEmbedContentsResponse.to_json": true, + "google.generativeai.protos.BatchEmbedContentsResponse.wrap": true, + "google.generativeai.protos.BatchEmbedTextRequest": false, + "google.generativeai.protos.BatchEmbedTextRequest.__call__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__eq__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__ge__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__gt__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__init__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__le__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__lt__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__ne__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__new__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__or__": true, + "google.generativeai.protos.BatchEmbedTextRequest.__ror__": true, + "google.generativeai.protos.BatchEmbedTextRequest.copy_from": true, + "google.generativeai.protos.BatchEmbedTextRequest.deserialize": true, + "google.generativeai.protos.BatchEmbedTextRequest.from_json": true, + "google.generativeai.protos.BatchEmbedTextRequest.model": true, + "google.generativeai.protos.BatchEmbedTextRequest.mro": true, + "google.generativeai.protos.BatchEmbedTextRequest.pb": true, + "google.generativeai.protos.BatchEmbedTextRequest.requests": true, + "google.generativeai.protos.BatchEmbedTextRequest.serialize": true, + "google.generativeai.protos.BatchEmbedTextRequest.texts": true, + "google.generativeai.protos.BatchEmbedTextRequest.to_dict": true, + "google.generativeai.protos.BatchEmbedTextRequest.to_json": true, + "google.generativeai.protos.BatchEmbedTextRequest.wrap": true, + "google.generativeai.protos.BatchEmbedTextResponse": false, + "google.generativeai.protos.BatchEmbedTextResponse.__call__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__eq__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__ge__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__gt__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__init__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__le__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__lt__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__ne__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__new__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__or__": true, + "google.generativeai.protos.BatchEmbedTextResponse.__ror__": true, + "google.generativeai.protos.BatchEmbedTextResponse.copy_from": true, + "google.generativeai.protos.BatchEmbedTextResponse.deserialize": true, + "google.generativeai.protos.BatchEmbedTextResponse.embeddings": true, + "google.generativeai.protos.BatchEmbedTextResponse.from_json": true, + "google.generativeai.protos.BatchEmbedTextResponse.mro": true, + "google.generativeai.protos.BatchEmbedTextResponse.pb": true, + "google.generativeai.protos.BatchEmbedTextResponse.serialize": true, + "google.generativeai.protos.BatchEmbedTextResponse.to_dict": true, + "google.generativeai.protos.BatchEmbedTextResponse.to_json": true, + "google.generativeai.protos.BatchEmbedTextResponse.wrap": true, + "google.generativeai.protos.BatchUpdateChunksRequest": false, + "google.generativeai.protos.BatchUpdateChunksRequest.__call__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__init__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__le__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__new__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__or__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.__ror__": true, + "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": true, + "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": true, + "google.generativeai.protos.BatchUpdateChunksRequest.from_json": true, + "google.generativeai.protos.BatchUpdateChunksRequest.mro": true, + "google.generativeai.protos.BatchUpdateChunksRequest.parent": true, + "google.generativeai.protos.BatchUpdateChunksRequest.pb": true, + "google.generativeai.protos.BatchUpdateChunksRequest.requests": true, + "google.generativeai.protos.BatchUpdateChunksRequest.serialize": true, + "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": true, + "google.generativeai.protos.BatchUpdateChunksRequest.to_json": true, + "google.generativeai.protos.BatchUpdateChunksRequest.wrap": true, + "google.generativeai.protos.BatchUpdateChunksResponse": false, + "google.generativeai.protos.BatchUpdateChunksResponse.__call__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__init__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__le__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__new__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__or__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.__ror__": true, + "google.generativeai.protos.BatchUpdateChunksResponse.chunks": true, + "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": true, + "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": true, + "google.generativeai.protos.BatchUpdateChunksResponse.from_json": true, + "google.generativeai.protos.BatchUpdateChunksResponse.mro": true, + "google.generativeai.protos.BatchUpdateChunksResponse.pb": true, + "google.generativeai.protos.BatchUpdateChunksResponse.serialize": true, + "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": true, + "google.generativeai.protos.BatchUpdateChunksResponse.to_json": true, + "google.generativeai.protos.BatchUpdateChunksResponse.wrap": true, + "google.generativeai.protos.Blob": false, + "google.generativeai.protos.Blob.__call__": true, + "google.generativeai.protos.Blob.__eq__": true, + "google.generativeai.protos.Blob.__ge__": true, + "google.generativeai.protos.Blob.__gt__": true, + "google.generativeai.protos.Blob.__init__": true, + "google.generativeai.protos.Blob.__le__": true, + "google.generativeai.protos.Blob.__lt__": true, + "google.generativeai.protos.Blob.__ne__": true, + "google.generativeai.protos.Blob.__new__": true, + "google.generativeai.protos.Blob.__or__": true, + "google.generativeai.protos.Blob.__ror__": true, + "google.generativeai.protos.Blob.copy_from": true, + "google.generativeai.protos.Blob.data": true, + "google.generativeai.protos.Blob.deserialize": true, + "google.generativeai.protos.Blob.from_json": true, + "google.generativeai.protos.Blob.mime_type": true, + "google.generativeai.protos.Blob.mro": true, + "google.generativeai.protos.Blob.pb": true, + "google.generativeai.protos.Blob.serialize": true, + "google.generativeai.protos.Blob.to_dict": true, + "google.generativeai.protos.Blob.to_json": true, + "google.generativeai.protos.Blob.wrap": true, + "google.generativeai.protos.CachedContent": false, + "google.generativeai.protos.CachedContent.UsageMetadata": false, + "google.generativeai.protos.CachedContent.UsageMetadata.__call__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__init__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__le__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__new__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__or__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.__ror__": true, + "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": true, + "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": true, + "google.generativeai.protos.CachedContent.UsageMetadata.from_json": true, + "google.generativeai.protos.CachedContent.UsageMetadata.mro": true, + "google.generativeai.protos.CachedContent.UsageMetadata.pb": true, + "google.generativeai.protos.CachedContent.UsageMetadata.serialize": true, + "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": true, + "google.generativeai.protos.CachedContent.UsageMetadata.to_json": true, + "google.generativeai.protos.CachedContent.UsageMetadata.total_token_count": true, + "google.generativeai.protos.CachedContent.UsageMetadata.wrap": true, + "google.generativeai.protos.CachedContent.__call__": true, + "google.generativeai.protos.CachedContent.__eq__": true, + "google.generativeai.protos.CachedContent.__ge__": true, + "google.generativeai.protos.CachedContent.__gt__": true, + "google.generativeai.protos.CachedContent.__init__": true, + "google.generativeai.protos.CachedContent.__le__": true, + "google.generativeai.protos.CachedContent.__lt__": true, + "google.generativeai.protos.CachedContent.__ne__": true, + "google.generativeai.protos.CachedContent.__new__": true, + "google.generativeai.protos.CachedContent.__or__": true, + "google.generativeai.protos.CachedContent.__ror__": true, + "google.generativeai.protos.CachedContent.contents": true, + "google.generativeai.protos.CachedContent.copy_from": true, + "google.generativeai.protos.CachedContent.create_time": true, + "google.generativeai.protos.CachedContent.deserialize": true, + "google.generativeai.protos.CachedContent.display_name": true, + "google.generativeai.protos.CachedContent.expire_time": true, + "google.generativeai.protos.CachedContent.from_json": true, + "google.generativeai.protos.CachedContent.model": true, + "google.generativeai.protos.CachedContent.mro": true, + "google.generativeai.protos.CachedContent.name": true, + "google.generativeai.protos.CachedContent.pb": true, + "google.generativeai.protos.CachedContent.serialize": true, + "google.generativeai.protos.CachedContent.system_instruction": true, + "google.generativeai.protos.CachedContent.to_dict": true, + "google.generativeai.protos.CachedContent.to_json": true, + "google.generativeai.protos.CachedContent.tool_config": true, + "google.generativeai.protos.CachedContent.tools": true, + "google.generativeai.protos.CachedContent.ttl": true, + "google.generativeai.protos.CachedContent.update_time": true, + "google.generativeai.protos.CachedContent.usage_metadata": true, + "google.generativeai.protos.CachedContent.wrap": true, + "google.generativeai.protos.Candidate": false, + "google.generativeai.protos.Candidate.FinishReason": false, + "google.generativeai.protos.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED": true, + "google.generativeai.protos.Candidate.FinishReason.MAX_TOKENS": true, + "google.generativeai.protos.Candidate.FinishReason.OTHER": true, + "google.generativeai.protos.Candidate.FinishReason.RECITATION": true, + "google.generativeai.protos.Candidate.FinishReason.SAFETY": true, + "google.generativeai.protos.Candidate.FinishReason.STOP": true, + "google.generativeai.protos.Candidate.FinishReason.__abs__": true, + "google.generativeai.protos.Candidate.FinishReason.__add__": true, + "google.generativeai.protos.Candidate.FinishReason.__and__": true, + "google.generativeai.protos.Candidate.FinishReason.__bool__": true, + "google.generativeai.protos.Candidate.FinishReason.__contains__": true, + "google.generativeai.protos.Candidate.FinishReason.__eq__": true, + "google.generativeai.protos.Candidate.FinishReason.__floordiv__": true, + "google.generativeai.protos.Candidate.FinishReason.__ge__": true, + "google.generativeai.protos.Candidate.FinishReason.__getitem__": true, + "google.generativeai.protos.Candidate.FinishReason.__gt__": true, + "google.generativeai.protos.Candidate.FinishReason.__init__": true, + "google.generativeai.protos.Candidate.FinishReason.__invert__": true, + "google.generativeai.protos.Candidate.FinishReason.__iter__": true, + "google.generativeai.protos.Candidate.FinishReason.__le__": true, + "google.generativeai.protos.Candidate.FinishReason.__len__": true, + "google.generativeai.protos.Candidate.FinishReason.__lshift__": true, + "google.generativeai.protos.Candidate.FinishReason.__lt__": true, + "google.generativeai.protos.Candidate.FinishReason.__mod__": true, + "google.generativeai.protos.Candidate.FinishReason.__mul__": true, + "google.generativeai.protos.Candidate.FinishReason.__ne__": true, + "google.generativeai.protos.Candidate.FinishReason.__neg__": true, + "google.generativeai.protos.Candidate.FinishReason.__new__": true, + "google.generativeai.protos.Candidate.FinishReason.__or__": true, + "google.generativeai.protos.Candidate.FinishReason.__pos__": true, + "google.generativeai.protos.Candidate.FinishReason.__pow__": true, + "google.generativeai.protos.Candidate.FinishReason.__radd__": true, + "google.generativeai.protos.Candidate.FinishReason.__rand__": true, + "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__": true, + "google.generativeai.protos.Candidate.FinishReason.__rlshift__": true, + "google.generativeai.protos.Candidate.FinishReason.__rmod__": true, + "google.generativeai.protos.Candidate.FinishReason.__rmul__": true, + "google.generativeai.protos.Candidate.FinishReason.__ror__": true, + "google.generativeai.protos.Candidate.FinishReason.__rpow__": true, + "google.generativeai.protos.Candidate.FinishReason.__rrshift__": true, + "google.generativeai.protos.Candidate.FinishReason.__rshift__": true, + "google.generativeai.protos.Candidate.FinishReason.__rsub__": true, + "google.generativeai.protos.Candidate.FinishReason.__rtruediv__": true, + "google.generativeai.protos.Candidate.FinishReason.__rxor__": true, + "google.generativeai.protos.Candidate.FinishReason.__sub__": true, + "google.generativeai.protos.Candidate.FinishReason.__truediv__": true, + "google.generativeai.protos.Candidate.FinishReason.__xor__": true, + "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio": true, + "google.generativeai.protos.Candidate.FinishReason.bit_count": true, + "google.generativeai.protos.Candidate.FinishReason.bit_length": true, + "google.generativeai.protos.Candidate.FinishReason.conjugate": true, + "google.generativeai.protos.Candidate.FinishReason.denominator": true, + "google.generativeai.protos.Candidate.FinishReason.from_bytes": true, + "google.generativeai.protos.Candidate.FinishReason.imag": true, + "google.generativeai.protos.Candidate.FinishReason.numerator": true, + "google.generativeai.protos.Candidate.FinishReason.real": true, + "google.generativeai.protos.Candidate.FinishReason.to_bytes": true, + "google.generativeai.protos.Candidate.__call__": true, + "google.generativeai.protos.Candidate.__eq__": true, + "google.generativeai.protos.Candidate.__ge__": true, + "google.generativeai.protos.Candidate.__gt__": true, + "google.generativeai.protos.Candidate.__init__": true, + "google.generativeai.protos.Candidate.__le__": true, + "google.generativeai.protos.Candidate.__lt__": true, + "google.generativeai.protos.Candidate.__ne__": true, + "google.generativeai.protos.Candidate.__new__": true, + "google.generativeai.protos.Candidate.__or__": true, + "google.generativeai.protos.Candidate.__ror__": true, + "google.generativeai.protos.Candidate.citation_metadata": true, + "google.generativeai.protos.Candidate.content": true, + "google.generativeai.protos.Candidate.copy_from": true, + "google.generativeai.protos.Candidate.deserialize": true, + "google.generativeai.protos.Candidate.finish_reason": true, + "google.generativeai.protos.Candidate.from_json": true, + "google.generativeai.protos.Candidate.grounding_attributions": true, + "google.generativeai.protos.Candidate.index": true, + "google.generativeai.protos.Candidate.mro": true, + "google.generativeai.protos.Candidate.pb": true, + "google.generativeai.protos.Candidate.safety_ratings": true, + "google.generativeai.protos.Candidate.serialize": true, + "google.generativeai.protos.Candidate.to_dict": true, + "google.generativeai.protos.Candidate.to_json": true, + "google.generativeai.protos.Candidate.token_count": true, + "google.generativeai.protos.Candidate.wrap": true, + "google.generativeai.protos.Chunk": false, + "google.generativeai.protos.Chunk.State": false, + "google.generativeai.protos.Chunk.State.STATE_ACTIVE": true, + "google.generativeai.protos.Chunk.State.STATE_FAILED": true, + "google.generativeai.protos.Chunk.State.STATE_PENDING_PROCESSING": true, + "google.generativeai.protos.Chunk.State.STATE_UNSPECIFIED": true, + "google.generativeai.protos.Chunk.State.__abs__": true, + "google.generativeai.protos.Chunk.State.__add__": true, + "google.generativeai.protos.Chunk.State.__and__": true, + "google.generativeai.protos.Chunk.State.__bool__": true, + "google.generativeai.protos.Chunk.State.__contains__": true, + "google.generativeai.protos.Chunk.State.__eq__": true, + "google.generativeai.protos.Chunk.State.__floordiv__": true, + "google.generativeai.protos.Chunk.State.__ge__": true, + "google.generativeai.protos.Chunk.State.__getitem__": true, + "google.generativeai.protos.Chunk.State.__gt__": true, + "google.generativeai.protos.Chunk.State.__init__": true, + "google.generativeai.protos.Chunk.State.__invert__": true, + "google.generativeai.protos.Chunk.State.__iter__": true, + "google.generativeai.protos.Chunk.State.__le__": true, + "google.generativeai.protos.Chunk.State.__len__": true, + "google.generativeai.protos.Chunk.State.__lshift__": true, + "google.generativeai.protos.Chunk.State.__lt__": true, + "google.generativeai.protos.Chunk.State.__mod__": true, + "google.generativeai.protos.Chunk.State.__mul__": true, + "google.generativeai.protos.Chunk.State.__ne__": true, + "google.generativeai.protos.Chunk.State.__neg__": true, + "google.generativeai.protos.Chunk.State.__new__": true, + "google.generativeai.protos.Chunk.State.__or__": true, + "google.generativeai.protos.Chunk.State.__pos__": true, + "google.generativeai.protos.Chunk.State.__pow__": true, + "google.generativeai.protos.Chunk.State.__radd__": true, + "google.generativeai.protos.Chunk.State.__rand__": true, + "google.generativeai.protos.Chunk.State.__rfloordiv__": true, + "google.generativeai.protos.Chunk.State.__rlshift__": true, + "google.generativeai.protos.Chunk.State.__rmod__": true, + "google.generativeai.protos.Chunk.State.__rmul__": true, + "google.generativeai.protos.Chunk.State.__ror__": true, + "google.generativeai.protos.Chunk.State.__rpow__": true, + "google.generativeai.protos.Chunk.State.__rrshift__": true, + "google.generativeai.protos.Chunk.State.__rshift__": true, + "google.generativeai.protos.Chunk.State.__rsub__": true, + "google.generativeai.protos.Chunk.State.__rtruediv__": true, + "google.generativeai.protos.Chunk.State.__rxor__": true, + "google.generativeai.protos.Chunk.State.__sub__": true, + "google.generativeai.protos.Chunk.State.__truediv__": true, + "google.generativeai.protos.Chunk.State.__xor__": true, + "google.generativeai.protos.Chunk.State.as_integer_ratio": true, + "google.generativeai.protos.Chunk.State.bit_count": true, + "google.generativeai.protos.Chunk.State.bit_length": true, + "google.generativeai.protos.Chunk.State.conjugate": true, + "google.generativeai.protos.Chunk.State.denominator": true, + "google.generativeai.protos.Chunk.State.from_bytes": true, + "google.generativeai.protos.Chunk.State.imag": true, + "google.generativeai.protos.Chunk.State.numerator": true, + "google.generativeai.protos.Chunk.State.real": true, + "google.generativeai.protos.Chunk.State.to_bytes": true, + "google.generativeai.protos.Chunk.__call__": true, + "google.generativeai.protos.Chunk.__eq__": true, + "google.generativeai.protos.Chunk.__ge__": true, + "google.generativeai.protos.Chunk.__gt__": true, + "google.generativeai.protos.Chunk.__init__": true, + "google.generativeai.protos.Chunk.__le__": true, + "google.generativeai.protos.Chunk.__lt__": true, + "google.generativeai.protos.Chunk.__ne__": true, + "google.generativeai.protos.Chunk.__new__": true, + "google.generativeai.protos.Chunk.__or__": true, + "google.generativeai.protos.Chunk.__ror__": true, + "google.generativeai.protos.Chunk.copy_from": true, + "google.generativeai.protos.Chunk.create_time": true, + "google.generativeai.protos.Chunk.custom_metadata": true, + "google.generativeai.protos.Chunk.data": true, + "google.generativeai.protos.Chunk.deserialize": true, + "google.generativeai.protos.Chunk.from_json": true, + "google.generativeai.protos.Chunk.mro": true, + "google.generativeai.protos.Chunk.name": true, + "google.generativeai.protos.Chunk.pb": true, + "google.generativeai.protos.Chunk.serialize": true, + "google.generativeai.protos.Chunk.state": true, + "google.generativeai.protos.Chunk.to_dict": true, + "google.generativeai.protos.Chunk.to_json": true, + "google.generativeai.protos.Chunk.update_time": true, + "google.generativeai.protos.Chunk.wrap": true, + "google.generativeai.protos.ChunkData": false, + "google.generativeai.protos.ChunkData.__call__": true, + "google.generativeai.protos.ChunkData.__eq__": true, + "google.generativeai.protos.ChunkData.__ge__": true, + "google.generativeai.protos.ChunkData.__gt__": true, + "google.generativeai.protos.ChunkData.__init__": true, + "google.generativeai.protos.ChunkData.__le__": true, + "google.generativeai.protos.ChunkData.__lt__": true, + "google.generativeai.protos.ChunkData.__ne__": true, + "google.generativeai.protos.ChunkData.__new__": true, + "google.generativeai.protos.ChunkData.__or__": true, + "google.generativeai.protos.ChunkData.__ror__": true, + "google.generativeai.protos.ChunkData.copy_from": true, + "google.generativeai.protos.ChunkData.deserialize": true, + "google.generativeai.protos.ChunkData.from_json": true, + "google.generativeai.protos.ChunkData.mro": true, + "google.generativeai.protos.ChunkData.pb": true, + "google.generativeai.protos.ChunkData.serialize": true, + "google.generativeai.protos.ChunkData.string_value": true, + "google.generativeai.protos.ChunkData.to_dict": true, + "google.generativeai.protos.ChunkData.to_json": true, + "google.generativeai.protos.ChunkData.wrap": true, + "google.generativeai.protos.CitationMetadata": false, + "google.generativeai.protos.CitationMetadata.__call__": true, + "google.generativeai.protos.CitationMetadata.__eq__": true, + "google.generativeai.protos.CitationMetadata.__ge__": true, + "google.generativeai.protos.CitationMetadata.__gt__": true, + "google.generativeai.protos.CitationMetadata.__init__": true, + "google.generativeai.protos.CitationMetadata.__le__": true, + "google.generativeai.protos.CitationMetadata.__lt__": true, + "google.generativeai.protos.CitationMetadata.__ne__": true, + "google.generativeai.protos.CitationMetadata.__new__": true, + "google.generativeai.protos.CitationMetadata.__or__": true, + "google.generativeai.protos.CitationMetadata.__ror__": true, + "google.generativeai.protos.CitationMetadata.citation_sources": true, + "google.generativeai.protos.CitationMetadata.copy_from": true, + "google.generativeai.protos.CitationMetadata.deserialize": true, + "google.generativeai.protos.CitationMetadata.from_json": true, + "google.generativeai.protos.CitationMetadata.mro": true, + "google.generativeai.protos.CitationMetadata.pb": true, + "google.generativeai.protos.CitationMetadata.serialize": true, + "google.generativeai.protos.CitationMetadata.to_dict": true, + "google.generativeai.protos.CitationMetadata.to_json": true, + "google.generativeai.protos.CitationMetadata.wrap": true, + "google.generativeai.protos.CitationSource": false, + "google.generativeai.protos.CitationSource.__call__": true, + "google.generativeai.protos.CitationSource.__eq__": true, + "google.generativeai.protos.CitationSource.__ge__": true, + "google.generativeai.protos.CitationSource.__gt__": true, + "google.generativeai.protos.CitationSource.__init__": true, + "google.generativeai.protos.CitationSource.__le__": true, + "google.generativeai.protos.CitationSource.__lt__": true, + "google.generativeai.protos.CitationSource.__ne__": true, + "google.generativeai.protos.CitationSource.__new__": true, + "google.generativeai.protos.CitationSource.__or__": true, + "google.generativeai.protos.CitationSource.__ror__": true, + "google.generativeai.protos.CitationSource.copy_from": true, + "google.generativeai.protos.CitationSource.deserialize": true, + "google.generativeai.protos.CitationSource.end_index": true, + "google.generativeai.protos.CitationSource.from_json": true, + "google.generativeai.protos.CitationSource.license_": true, + "google.generativeai.protos.CitationSource.mro": true, + "google.generativeai.protos.CitationSource.pb": true, + "google.generativeai.protos.CitationSource.serialize": true, + "google.generativeai.protos.CitationSource.start_index": true, + "google.generativeai.protos.CitationSource.to_dict": true, + "google.generativeai.protos.CitationSource.to_json": true, + "google.generativeai.protos.CitationSource.uri": true, + "google.generativeai.protos.CitationSource.wrap": true, + "google.generativeai.protos.CodeExecution": false, + "google.generativeai.protos.CodeExecution.__call__": true, + "google.generativeai.protos.CodeExecution.__eq__": true, + "google.generativeai.protos.CodeExecution.__ge__": true, + "google.generativeai.protos.CodeExecution.__gt__": true, + "google.generativeai.protos.CodeExecution.__init__": true, + "google.generativeai.protos.CodeExecution.__le__": true, + "google.generativeai.protos.CodeExecution.__lt__": true, + "google.generativeai.protos.CodeExecution.__ne__": true, + "google.generativeai.protos.CodeExecution.__new__": true, + "google.generativeai.protos.CodeExecution.__or__": true, + "google.generativeai.protos.CodeExecution.__ror__": true, + "google.generativeai.protos.CodeExecution.copy_from": true, + "google.generativeai.protos.CodeExecution.deserialize": true, + "google.generativeai.protos.CodeExecution.from_json": true, + "google.generativeai.protos.CodeExecution.mro": true, + "google.generativeai.protos.CodeExecution.pb": true, + "google.generativeai.protos.CodeExecution.serialize": true, + "google.generativeai.protos.CodeExecution.to_dict": true, + "google.generativeai.protos.CodeExecution.to_json": true, + "google.generativeai.protos.CodeExecution.wrap": true, + "google.generativeai.protos.CodeExecutionResult": false, + "google.generativeai.protos.CodeExecutionResult.Outcome": false, + "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_DEADLINE_EXCEEDED": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_FAILED": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_OK": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_UNSPECIFIED": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.from_bytes": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.imag": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.real": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": true, + "google.generativeai.protos.CodeExecutionResult.__call__": true, + "google.generativeai.protos.CodeExecutionResult.__eq__": true, + "google.generativeai.protos.CodeExecutionResult.__ge__": true, + "google.generativeai.protos.CodeExecutionResult.__gt__": true, + "google.generativeai.protos.CodeExecutionResult.__init__": true, + "google.generativeai.protos.CodeExecutionResult.__le__": true, + "google.generativeai.protos.CodeExecutionResult.__lt__": true, + "google.generativeai.protos.CodeExecutionResult.__ne__": true, + "google.generativeai.protos.CodeExecutionResult.__new__": true, + "google.generativeai.protos.CodeExecutionResult.__or__": true, + "google.generativeai.protos.CodeExecutionResult.__ror__": true, + "google.generativeai.protos.CodeExecutionResult.copy_from": true, + "google.generativeai.protos.CodeExecutionResult.deserialize": true, + "google.generativeai.protos.CodeExecutionResult.from_json": true, + "google.generativeai.protos.CodeExecutionResult.mro": true, + "google.generativeai.protos.CodeExecutionResult.outcome": true, + "google.generativeai.protos.CodeExecutionResult.output": true, + "google.generativeai.protos.CodeExecutionResult.pb": true, + "google.generativeai.protos.CodeExecutionResult.serialize": true, + "google.generativeai.protos.CodeExecutionResult.to_dict": true, + "google.generativeai.protos.CodeExecutionResult.to_json": true, + "google.generativeai.protos.CodeExecutionResult.wrap": true, + "google.generativeai.protos.Condition": false, + "google.generativeai.protos.Condition.Operator": false, + "google.generativeai.protos.Condition.Operator.EQUAL": true, + "google.generativeai.protos.Condition.Operator.EXCLUDES": true, + "google.generativeai.protos.Condition.Operator.GREATER": true, + "google.generativeai.protos.Condition.Operator.GREATER_EQUAL": true, + "google.generativeai.protos.Condition.Operator.INCLUDES": true, + "google.generativeai.protos.Condition.Operator.LESS": true, + "google.generativeai.protos.Condition.Operator.LESS_EQUAL": true, + "google.generativeai.protos.Condition.Operator.NOT_EQUAL": true, + "google.generativeai.protos.Condition.Operator.OPERATOR_UNSPECIFIED": true, + "google.generativeai.protos.Condition.Operator.__abs__": true, + "google.generativeai.protos.Condition.Operator.__add__": true, + "google.generativeai.protos.Condition.Operator.__and__": true, + "google.generativeai.protos.Condition.Operator.__bool__": true, + "google.generativeai.protos.Condition.Operator.__contains__": true, + "google.generativeai.protos.Condition.Operator.__eq__": true, + "google.generativeai.protos.Condition.Operator.__floordiv__": true, + "google.generativeai.protos.Condition.Operator.__ge__": true, + "google.generativeai.protos.Condition.Operator.__getitem__": true, + "google.generativeai.protos.Condition.Operator.__gt__": true, + "google.generativeai.protos.Condition.Operator.__init__": true, + "google.generativeai.protos.Condition.Operator.__invert__": true, + "google.generativeai.protos.Condition.Operator.__iter__": true, + "google.generativeai.protos.Condition.Operator.__le__": true, + "google.generativeai.protos.Condition.Operator.__len__": true, + "google.generativeai.protos.Condition.Operator.__lshift__": true, + "google.generativeai.protos.Condition.Operator.__lt__": true, + "google.generativeai.protos.Condition.Operator.__mod__": true, + "google.generativeai.protos.Condition.Operator.__mul__": true, + "google.generativeai.protos.Condition.Operator.__ne__": true, + "google.generativeai.protos.Condition.Operator.__neg__": true, + "google.generativeai.protos.Condition.Operator.__new__": true, + "google.generativeai.protos.Condition.Operator.__or__": true, + "google.generativeai.protos.Condition.Operator.__pos__": true, + "google.generativeai.protos.Condition.Operator.__pow__": true, + "google.generativeai.protos.Condition.Operator.__radd__": true, + "google.generativeai.protos.Condition.Operator.__rand__": true, + "google.generativeai.protos.Condition.Operator.__rfloordiv__": true, + "google.generativeai.protos.Condition.Operator.__rlshift__": true, + "google.generativeai.protos.Condition.Operator.__rmod__": true, + "google.generativeai.protos.Condition.Operator.__rmul__": true, + "google.generativeai.protos.Condition.Operator.__ror__": true, + "google.generativeai.protos.Condition.Operator.__rpow__": true, + "google.generativeai.protos.Condition.Operator.__rrshift__": true, + "google.generativeai.protos.Condition.Operator.__rshift__": true, + "google.generativeai.protos.Condition.Operator.__rsub__": true, + "google.generativeai.protos.Condition.Operator.__rtruediv__": true, + "google.generativeai.protos.Condition.Operator.__rxor__": true, + "google.generativeai.protos.Condition.Operator.__sub__": true, + "google.generativeai.protos.Condition.Operator.__truediv__": true, + "google.generativeai.protos.Condition.Operator.__xor__": true, + "google.generativeai.protos.Condition.Operator.as_integer_ratio": true, + "google.generativeai.protos.Condition.Operator.bit_count": true, + "google.generativeai.protos.Condition.Operator.bit_length": true, + "google.generativeai.protos.Condition.Operator.conjugate": true, + "google.generativeai.protos.Condition.Operator.denominator": true, + "google.generativeai.protos.Condition.Operator.from_bytes": true, + "google.generativeai.protos.Condition.Operator.imag": true, + "google.generativeai.protos.Condition.Operator.numerator": true, + "google.generativeai.protos.Condition.Operator.real": true, + "google.generativeai.protos.Condition.Operator.to_bytes": true, + "google.generativeai.protos.Condition.__call__": true, + "google.generativeai.protos.Condition.__eq__": true, + "google.generativeai.protos.Condition.__ge__": true, + "google.generativeai.protos.Condition.__gt__": true, + "google.generativeai.protos.Condition.__init__": true, + "google.generativeai.protos.Condition.__le__": true, + "google.generativeai.protos.Condition.__lt__": true, + "google.generativeai.protos.Condition.__ne__": true, + "google.generativeai.protos.Condition.__new__": true, + "google.generativeai.protos.Condition.__or__": true, + "google.generativeai.protos.Condition.__ror__": true, + "google.generativeai.protos.Condition.copy_from": true, + "google.generativeai.protos.Condition.deserialize": true, + "google.generativeai.protos.Condition.from_json": true, + "google.generativeai.protos.Condition.mro": true, + "google.generativeai.protos.Condition.numeric_value": true, + "google.generativeai.protos.Condition.operation": true, + "google.generativeai.protos.Condition.pb": true, + "google.generativeai.protos.Condition.serialize": true, + "google.generativeai.protos.Condition.string_value": true, + "google.generativeai.protos.Condition.to_dict": true, + "google.generativeai.protos.Condition.to_json": true, + "google.generativeai.protos.Condition.wrap": true, + "google.generativeai.protos.Content": false, + "google.generativeai.protos.Content.__call__": true, + "google.generativeai.protos.Content.__eq__": true, + "google.generativeai.protos.Content.__ge__": true, + "google.generativeai.protos.Content.__gt__": true, + "google.generativeai.protos.Content.__init__": true, + "google.generativeai.protos.Content.__le__": true, + "google.generativeai.protos.Content.__lt__": true, + "google.generativeai.protos.Content.__ne__": true, + "google.generativeai.protos.Content.__new__": true, + "google.generativeai.protos.Content.__or__": true, + "google.generativeai.protos.Content.__ror__": true, + "google.generativeai.protos.Content.copy_from": true, + "google.generativeai.protos.Content.deserialize": true, + "google.generativeai.protos.Content.from_json": true, + "google.generativeai.protos.Content.mro": true, + "google.generativeai.protos.Content.parts": true, + "google.generativeai.protos.Content.pb": true, + "google.generativeai.protos.Content.role": true, + "google.generativeai.protos.Content.serialize": true, + "google.generativeai.protos.Content.to_dict": true, + "google.generativeai.protos.Content.to_json": true, + "google.generativeai.protos.Content.wrap": true, + "google.generativeai.protos.ContentEmbedding": false, + "google.generativeai.protos.ContentEmbedding.__call__": true, + "google.generativeai.protos.ContentEmbedding.__eq__": true, + "google.generativeai.protos.ContentEmbedding.__ge__": true, + "google.generativeai.protos.ContentEmbedding.__gt__": true, + "google.generativeai.protos.ContentEmbedding.__init__": true, + "google.generativeai.protos.ContentEmbedding.__le__": true, + "google.generativeai.protos.ContentEmbedding.__lt__": true, + "google.generativeai.protos.ContentEmbedding.__ne__": true, + "google.generativeai.protos.ContentEmbedding.__new__": true, + "google.generativeai.protos.ContentEmbedding.__or__": true, + "google.generativeai.protos.ContentEmbedding.__ror__": true, + "google.generativeai.protos.ContentEmbedding.copy_from": true, + "google.generativeai.protos.ContentEmbedding.deserialize": true, + "google.generativeai.protos.ContentEmbedding.from_json": true, + "google.generativeai.protos.ContentEmbedding.mro": true, + "google.generativeai.protos.ContentEmbedding.pb": true, + "google.generativeai.protos.ContentEmbedding.serialize": true, + "google.generativeai.protos.ContentEmbedding.to_dict": true, + "google.generativeai.protos.ContentEmbedding.to_json": true, + "google.generativeai.protos.ContentEmbedding.values": true, + "google.generativeai.protos.ContentEmbedding.wrap": true, + "google.generativeai.protos.ContentFilter": false, + "google.generativeai.protos.ContentFilter.BlockedReason": false, + "google.generativeai.protos.ContentFilter.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true, + "google.generativeai.protos.ContentFilter.BlockedReason.OTHER": true, + "google.generativeai.protos.ContentFilter.BlockedReason.SAFETY": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__add__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__and__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__contains__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__init__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__iter__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__le__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__len__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__new__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__or__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": true, + "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": true, + "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": true, + "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": true, + "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": true, + "google.generativeai.protos.ContentFilter.BlockedReason.denominator": true, + "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes": true, + "google.generativeai.protos.ContentFilter.BlockedReason.imag": true, + "google.generativeai.protos.ContentFilter.BlockedReason.numerator": true, + "google.generativeai.protos.ContentFilter.BlockedReason.real": true, + "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": true, + "google.generativeai.protos.ContentFilter.__call__": true, + "google.generativeai.protos.ContentFilter.__eq__": true, + "google.generativeai.protos.ContentFilter.__ge__": true, + "google.generativeai.protos.ContentFilter.__gt__": true, + "google.generativeai.protos.ContentFilter.__init__": true, + "google.generativeai.protos.ContentFilter.__le__": true, + "google.generativeai.protos.ContentFilter.__lt__": true, + "google.generativeai.protos.ContentFilter.__ne__": true, + "google.generativeai.protos.ContentFilter.__new__": true, + "google.generativeai.protos.ContentFilter.__or__": true, + "google.generativeai.protos.ContentFilter.__ror__": true, + "google.generativeai.protos.ContentFilter.copy_from": true, + "google.generativeai.protos.ContentFilter.deserialize": true, + "google.generativeai.protos.ContentFilter.from_json": true, + "google.generativeai.protos.ContentFilter.message": true, + "google.generativeai.protos.ContentFilter.mro": true, + "google.generativeai.protos.ContentFilter.pb": true, + "google.generativeai.protos.ContentFilter.reason": true, + "google.generativeai.protos.ContentFilter.serialize": true, + "google.generativeai.protos.ContentFilter.to_dict": true, + "google.generativeai.protos.ContentFilter.to_json": true, + "google.generativeai.protos.ContentFilter.wrap": true, + "google.generativeai.protos.Corpus": false, + "google.generativeai.protos.Corpus.__call__": true, + "google.generativeai.protos.Corpus.__eq__": true, + "google.generativeai.protos.Corpus.__ge__": true, + "google.generativeai.protos.Corpus.__gt__": true, + "google.generativeai.protos.Corpus.__init__": true, + "google.generativeai.protos.Corpus.__le__": true, + "google.generativeai.protos.Corpus.__lt__": true, + "google.generativeai.protos.Corpus.__ne__": true, + "google.generativeai.protos.Corpus.__new__": true, + "google.generativeai.protos.Corpus.__or__": true, + "google.generativeai.protos.Corpus.__ror__": true, + "google.generativeai.protos.Corpus.copy_from": true, + "google.generativeai.protos.Corpus.create_time": true, + "google.generativeai.protos.Corpus.deserialize": true, + "google.generativeai.protos.Corpus.display_name": true, + "google.generativeai.protos.Corpus.from_json": true, + "google.generativeai.protos.Corpus.mro": true, + "google.generativeai.protos.Corpus.name": true, + "google.generativeai.protos.Corpus.pb": true, + "google.generativeai.protos.Corpus.serialize": true, + "google.generativeai.protos.Corpus.to_dict": true, + "google.generativeai.protos.Corpus.to_json": true, + "google.generativeai.protos.Corpus.update_time": true, + "google.generativeai.protos.Corpus.wrap": true, + "google.generativeai.protos.CountMessageTokensRequest": false, + "google.generativeai.protos.CountMessageTokensRequest.__call__": true, + "google.generativeai.protos.CountMessageTokensRequest.__eq__": true, + "google.generativeai.protos.CountMessageTokensRequest.__ge__": true, + "google.generativeai.protos.CountMessageTokensRequest.__gt__": true, + "google.generativeai.protos.CountMessageTokensRequest.__init__": true, + "google.generativeai.protos.CountMessageTokensRequest.__le__": true, + "google.generativeai.protos.CountMessageTokensRequest.__lt__": true, + "google.generativeai.protos.CountMessageTokensRequest.__ne__": true, + "google.generativeai.protos.CountMessageTokensRequest.__new__": true, + "google.generativeai.protos.CountMessageTokensRequest.__or__": true, + "google.generativeai.protos.CountMessageTokensRequest.__ror__": true, + "google.generativeai.protos.CountMessageTokensRequest.copy_from": true, + "google.generativeai.protos.CountMessageTokensRequest.deserialize": true, + "google.generativeai.protos.CountMessageTokensRequest.from_json": true, + "google.generativeai.protos.CountMessageTokensRequest.model": true, + "google.generativeai.protos.CountMessageTokensRequest.mro": true, + "google.generativeai.protos.CountMessageTokensRequest.pb": true, + "google.generativeai.protos.CountMessageTokensRequest.prompt": true, + "google.generativeai.protos.CountMessageTokensRequest.serialize": true, + "google.generativeai.protos.CountMessageTokensRequest.to_dict": true, + "google.generativeai.protos.CountMessageTokensRequest.to_json": true, + "google.generativeai.protos.CountMessageTokensRequest.wrap": true, + "google.generativeai.protos.CountMessageTokensResponse": false, + "google.generativeai.protos.CountMessageTokensResponse.__call__": true, + "google.generativeai.protos.CountMessageTokensResponse.__eq__": true, + "google.generativeai.protos.CountMessageTokensResponse.__ge__": true, + "google.generativeai.protos.CountMessageTokensResponse.__gt__": true, + "google.generativeai.protos.CountMessageTokensResponse.__init__": true, + "google.generativeai.protos.CountMessageTokensResponse.__le__": true, + "google.generativeai.protos.CountMessageTokensResponse.__lt__": true, + "google.generativeai.protos.CountMessageTokensResponse.__ne__": true, + "google.generativeai.protos.CountMessageTokensResponse.__new__": true, + "google.generativeai.protos.CountMessageTokensResponse.__or__": true, + "google.generativeai.protos.CountMessageTokensResponse.__ror__": true, + "google.generativeai.protos.CountMessageTokensResponse.copy_from": true, + "google.generativeai.protos.CountMessageTokensResponse.deserialize": true, + "google.generativeai.protos.CountMessageTokensResponse.from_json": true, + "google.generativeai.protos.CountMessageTokensResponse.mro": true, + "google.generativeai.protos.CountMessageTokensResponse.pb": true, + "google.generativeai.protos.CountMessageTokensResponse.serialize": true, + "google.generativeai.protos.CountMessageTokensResponse.to_dict": true, + "google.generativeai.protos.CountMessageTokensResponse.to_json": true, + "google.generativeai.protos.CountMessageTokensResponse.token_count": true, + "google.generativeai.protos.CountMessageTokensResponse.wrap": true, + "google.generativeai.protos.CountTextTokensRequest": false, + "google.generativeai.protos.CountTextTokensRequest.__call__": true, + "google.generativeai.protos.CountTextTokensRequest.__eq__": true, + "google.generativeai.protos.CountTextTokensRequest.__ge__": true, + "google.generativeai.protos.CountTextTokensRequest.__gt__": true, + "google.generativeai.protos.CountTextTokensRequest.__init__": true, + "google.generativeai.protos.CountTextTokensRequest.__le__": true, + "google.generativeai.protos.CountTextTokensRequest.__lt__": true, + "google.generativeai.protos.CountTextTokensRequest.__ne__": true, + "google.generativeai.protos.CountTextTokensRequest.__new__": true, + "google.generativeai.protos.CountTextTokensRequest.__or__": true, + "google.generativeai.protos.CountTextTokensRequest.__ror__": true, + "google.generativeai.protos.CountTextTokensRequest.copy_from": true, + "google.generativeai.protos.CountTextTokensRequest.deserialize": true, + "google.generativeai.protos.CountTextTokensRequest.from_json": true, + "google.generativeai.protos.CountTextTokensRequest.model": true, + "google.generativeai.protos.CountTextTokensRequest.mro": true, + "google.generativeai.protos.CountTextTokensRequest.pb": true, + "google.generativeai.protos.CountTextTokensRequest.prompt": true, + "google.generativeai.protos.CountTextTokensRequest.serialize": true, + "google.generativeai.protos.CountTextTokensRequest.to_dict": true, + "google.generativeai.protos.CountTextTokensRequest.to_json": true, + "google.generativeai.protos.CountTextTokensRequest.wrap": true, + "google.generativeai.protos.CountTextTokensResponse": false, + "google.generativeai.protos.CountTextTokensResponse.__call__": true, + "google.generativeai.protos.CountTextTokensResponse.__eq__": true, + "google.generativeai.protos.CountTextTokensResponse.__ge__": true, + "google.generativeai.protos.CountTextTokensResponse.__gt__": true, + "google.generativeai.protos.CountTextTokensResponse.__init__": true, + "google.generativeai.protos.CountTextTokensResponse.__le__": true, + "google.generativeai.protos.CountTextTokensResponse.__lt__": true, + "google.generativeai.protos.CountTextTokensResponse.__ne__": true, + "google.generativeai.protos.CountTextTokensResponse.__new__": true, + "google.generativeai.protos.CountTextTokensResponse.__or__": true, + "google.generativeai.protos.CountTextTokensResponse.__ror__": true, + "google.generativeai.protos.CountTextTokensResponse.copy_from": true, + "google.generativeai.protos.CountTextTokensResponse.deserialize": true, + "google.generativeai.protos.CountTextTokensResponse.from_json": true, + "google.generativeai.protos.CountTextTokensResponse.mro": true, + "google.generativeai.protos.CountTextTokensResponse.pb": true, + "google.generativeai.protos.CountTextTokensResponse.serialize": true, + "google.generativeai.protos.CountTextTokensResponse.to_dict": true, + "google.generativeai.protos.CountTextTokensResponse.to_json": true, + "google.generativeai.protos.CountTextTokensResponse.token_count": true, + "google.generativeai.protos.CountTextTokensResponse.wrap": true, + "google.generativeai.protos.CountTokensRequest": false, + "google.generativeai.protos.CountTokensRequest.__call__": true, + "google.generativeai.protos.CountTokensRequest.__eq__": true, + "google.generativeai.protos.CountTokensRequest.__ge__": true, + "google.generativeai.protos.CountTokensRequest.__gt__": true, + "google.generativeai.protos.CountTokensRequest.__init__": true, + "google.generativeai.protos.CountTokensRequest.__le__": true, + "google.generativeai.protos.CountTokensRequest.__lt__": true, + "google.generativeai.protos.CountTokensRequest.__ne__": true, + "google.generativeai.protos.CountTokensRequest.__new__": true, + "google.generativeai.protos.CountTokensRequest.__or__": true, + "google.generativeai.protos.CountTokensRequest.__ror__": true, + "google.generativeai.protos.CountTokensRequest.contents": true, + "google.generativeai.protos.CountTokensRequest.copy_from": true, + "google.generativeai.protos.CountTokensRequest.deserialize": true, + "google.generativeai.protos.CountTokensRequest.from_json": true, + "google.generativeai.protos.CountTokensRequest.generate_content_request": true, + "google.generativeai.protos.CountTokensRequest.model": true, + "google.generativeai.protos.CountTokensRequest.mro": true, + "google.generativeai.protos.CountTokensRequest.pb": true, + "google.generativeai.protos.CountTokensRequest.serialize": true, + "google.generativeai.protos.CountTokensRequest.to_dict": true, + "google.generativeai.protos.CountTokensRequest.to_json": true, + "google.generativeai.protos.CountTokensRequest.wrap": true, + "google.generativeai.protos.CountTokensResponse": false, + "google.generativeai.protos.CountTokensResponse.__call__": true, + "google.generativeai.protos.CountTokensResponse.__eq__": true, + "google.generativeai.protos.CountTokensResponse.__ge__": true, + "google.generativeai.protos.CountTokensResponse.__gt__": true, + "google.generativeai.protos.CountTokensResponse.__init__": true, + "google.generativeai.protos.CountTokensResponse.__le__": true, + "google.generativeai.protos.CountTokensResponse.__lt__": true, + "google.generativeai.protos.CountTokensResponse.__ne__": true, + "google.generativeai.protos.CountTokensResponse.__new__": true, + "google.generativeai.protos.CountTokensResponse.__or__": true, + "google.generativeai.protos.CountTokensResponse.__ror__": true, + "google.generativeai.protos.CountTokensResponse.cached_content_token_count": true, + "google.generativeai.protos.CountTokensResponse.copy_from": true, + "google.generativeai.protos.CountTokensResponse.deserialize": true, + "google.generativeai.protos.CountTokensResponse.from_json": true, + "google.generativeai.protos.CountTokensResponse.mro": true, + "google.generativeai.protos.CountTokensResponse.pb": true, + "google.generativeai.protos.CountTokensResponse.serialize": true, + "google.generativeai.protos.CountTokensResponse.to_dict": true, + "google.generativeai.protos.CountTokensResponse.to_json": true, + "google.generativeai.protos.CountTokensResponse.total_tokens": true, + "google.generativeai.protos.CountTokensResponse.wrap": true, + "google.generativeai.protos.CreateCachedContentRequest": false, + "google.generativeai.protos.CreateCachedContentRequest.__call__": true, + "google.generativeai.protos.CreateCachedContentRequest.__eq__": true, + "google.generativeai.protos.CreateCachedContentRequest.__ge__": true, + "google.generativeai.protos.CreateCachedContentRequest.__gt__": true, + "google.generativeai.protos.CreateCachedContentRequest.__init__": true, + "google.generativeai.protos.CreateCachedContentRequest.__le__": true, + "google.generativeai.protos.CreateCachedContentRequest.__lt__": true, + "google.generativeai.protos.CreateCachedContentRequest.__ne__": true, + "google.generativeai.protos.CreateCachedContentRequest.__new__": true, + "google.generativeai.protos.CreateCachedContentRequest.__or__": true, + "google.generativeai.protos.CreateCachedContentRequest.__ror__": true, + "google.generativeai.protos.CreateCachedContentRequest.cached_content": true, + "google.generativeai.protos.CreateCachedContentRequest.copy_from": true, + "google.generativeai.protos.CreateCachedContentRequest.deserialize": true, + "google.generativeai.protos.CreateCachedContentRequest.from_json": true, + "google.generativeai.protos.CreateCachedContentRequest.mro": true, + "google.generativeai.protos.CreateCachedContentRequest.pb": true, + "google.generativeai.protos.CreateCachedContentRequest.serialize": true, + "google.generativeai.protos.CreateCachedContentRequest.to_dict": true, + "google.generativeai.protos.CreateCachedContentRequest.to_json": true, + "google.generativeai.protos.CreateCachedContentRequest.wrap": true, + "google.generativeai.protos.CreateChunkRequest": false, + "google.generativeai.protos.CreateChunkRequest.__call__": true, + "google.generativeai.protos.CreateChunkRequest.__eq__": true, + "google.generativeai.protos.CreateChunkRequest.__ge__": true, + "google.generativeai.protos.CreateChunkRequest.__gt__": true, + "google.generativeai.protos.CreateChunkRequest.__init__": true, + "google.generativeai.protos.CreateChunkRequest.__le__": true, + "google.generativeai.protos.CreateChunkRequest.__lt__": true, + "google.generativeai.protos.CreateChunkRequest.__ne__": true, + "google.generativeai.protos.CreateChunkRequest.__new__": true, + "google.generativeai.protos.CreateChunkRequest.__or__": true, + "google.generativeai.protos.CreateChunkRequest.__ror__": true, + "google.generativeai.protos.CreateChunkRequest.chunk": true, + "google.generativeai.protos.CreateChunkRequest.copy_from": true, + "google.generativeai.protos.CreateChunkRequest.deserialize": true, + "google.generativeai.protos.CreateChunkRequest.from_json": true, + "google.generativeai.protos.CreateChunkRequest.mro": true, + "google.generativeai.protos.CreateChunkRequest.parent": true, + "google.generativeai.protos.CreateChunkRequest.pb": true, + "google.generativeai.protos.CreateChunkRequest.serialize": true, + "google.generativeai.protos.CreateChunkRequest.to_dict": true, + "google.generativeai.protos.CreateChunkRequest.to_json": true, + "google.generativeai.protos.CreateChunkRequest.wrap": true, + "google.generativeai.protos.CreateCorpusRequest": false, + "google.generativeai.protos.CreateCorpusRequest.__call__": true, + "google.generativeai.protos.CreateCorpusRequest.__eq__": true, + "google.generativeai.protos.CreateCorpusRequest.__ge__": true, + "google.generativeai.protos.CreateCorpusRequest.__gt__": true, + "google.generativeai.protos.CreateCorpusRequest.__init__": true, + "google.generativeai.protos.CreateCorpusRequest.__le__": true, + "google.generativeai.protos.CreateCorpusRequest.__lt__": true, + "google.generativeai.protos.CreateCorpusRequest.__ne__": true, + "google.generativeai.protos.CreateCorpusRequest.__new__": true, + "google.generativeai.protos.CreateCorpusRequest.__or__": true, + "google.generativeai.protos.CreateCorpusRequest.__ror__": true, + "google.generativeai.protos.CreateCorpusRequest.copy_from": true, + "google.generativeai.protos.CreateCorpusRequest.corpus": true, + "google.generativeai.protos.CreateCorpusRequest.deserialize": true, + "google.generativeai.protos.CreateCorpusRequest.from_json": true, + "google.generativeai.protos.CreateCorpusRequest.mro": true, + "google.generativeai.protos.CreateCorpusRequest.pb": true, + "google.generativeai.protos.CreateCorpusRequest.serialize": true, + "google.generativeai.protos.CreateCorpusRequest.to_dict": true, + "google.generativeai.protos.CreateCorpusRequest.to_json": true, + "google.generativeai.protos.CreateCorpusRequest.wrap": true, + "google.generativeai.protos.CreateDocumentRequest": false, + "google.generativeai.protos.CreateDocumentRequest.__call__": true, + "google.generativeai.protos.CreateDocumentRequest.__eq__": true, + "google.generativeai.protos.CreateDocumentRequest.__ge__": true, + "google.generativeai.protos.CreateDocumentRequest.__gt__": true, + "google.generativeai.protos.CreateDocumentRequest.__init__": true, + "google.generativeai.protos.CreateDocumentRequest.__le__": true, + "google.generativeai.protos.CreateDocumentRequest.__lt__": true, + "google.generativeai.protos.CreateDocumentRequest.__ne__": true, + "google.generativeai.protos.CreateDocumentRequest.__new__": true, + "google.generativeai.protos.CreateDocumentRequest.__or__": true, + "google.generativeai.protos.CreateDocumentRequest.__ror__": true, + "google.generativeai.protos.CreateDocumentRequest.copy_from": true, + "google.generativeai.protos.CreateDocumentRequest.deserialize": true, + "google.generativeai.protos.CreateDocumentRequest.document": true, + "google.generativeai.protos.CreateDocumentRequest.from_json": true, + "google.generativeai.protos.CreateDocumentRequest.mro": true, + "google.generativeai.protos.CreateDocumentRequest.parent": true, + "google.generativeai.protos.CreateDocumentRequest.pb": true, + "google.generativeai.protos.CreateDocumentRequest.serialize": true, + "google.generativeai.protos.CreateDocumentRequest.to_dict": true, + "google.generativeai.protos.CreateDocumentRequest.to_json": true, + "google.generativeai.protos.CreateDocumentRequest.wrap": true, + "google.generativeai.protos.CreateFileRequest": false, + "google.generativeai.protos.CreateFileRequest.__call__": true, + "google.generativeai.protos.CreateFileRequest.__eq__": true, + "google.generativeai.protos.CreateFileRequest.__ge__": true, + "google.generativeai.protos.CreateFileRequest.__gt__": true, + "google.generativeai.protos.CreateFileRequest.__init__": true, + "google.generativeai.protos.CreateFileRequest.__le__": true, + "google.generativeai.protos.CreateFileRequest.__lt__": true, + "google.generativeai.protos.CreateFileRequest.__ne__": true, + "google.generativeai.protos.CreateFileRequest.__new__": true, + "google.generativeai.protos.CreateFileRequest.__or__": true, + "google.generativeai.protos.CreateFileRequest.__ror__": true, + "google.generativeai.protos.CreateFileRequest.copy_from": true, + "google.generativeai.protos.CreateFileRequest.deserialize": true, + "google.generativeai.protos.CreateFileRequest.file": true, + "google.generativeai.protos.CreateFileRequest.from_json": true, + "google.generativeai.protos.CreateFileRequest.mro": true, + "google.generativeai.protos.CreateFileRequest.pb": true, + "google.generativeai.protos.CreateFileRequest.serialize": true, + "google.generativeai.protos.CreateFileRequest.to_dict": true, + "google.generativeai.protos.CreateFileRequest.to_json": true, + "google.generativeai.protos.CreateFileRequest.wrap": true, + "google.generativeai.protos.CreateFileResponse": false, + "google.generativeai.protos.CreateFileResponse.__call__": true, + "google.generativeai.protos.CreateFileResponse.__eq__": true, + "google.generativeai.protos.CreateFileResponse.__ge__": true, + "google.generativeai.protos.CreateFileResponse.__gt__": true, + "google.generativeai.protos.CreateFileResponse.__init__": true, + "google.generativeai.protos.CreateFileResponse.__le__": true, + "google.generativeai.protos.CreateFileResponse.__lt__": true, + "google.generativeai.protos.CreateFileResponse.__ne__": true, + "google.generativeai.protos.CreateFileResponse.__new__": true, + "google.generativeai.protos.CreateFileResponse.__or__": true, + "google.generativeai.protos.CreateFileResponse.__ror__": true, + "google.generativeai.protos.CreateFileResponse.copy_from": true, + "google.generativeai.protos.CreateFileResponse.deserialize": true, + "google.generativeai.protos.CreateFileResponse.file": true, + "google.generativeai.protos.CreateFileResponse.from_json": true, + "google.generativeai.protos.CreateFileResponse.mro": true, + "google.generativeai.protos.CreateFileResponse.pb": true, + "google.generativeai.protos.CreateFileResponse.serialize": true, + "google.generativeai.protos.CreateFileResponse.to_dict": true, + "google.generativeai.protos.CreateFileResponse.to_json": true, + "google.generativeai.protos.CreateFileResponse.wrap": true, + "google.generativeai.protos.CreatePermissionRequest": false, + "google.generativeai.protos.CreatePermissionRequest.__call__": true, + "google.generativeai.protos.CreatePermissionRequest.__eq__": true, + "google.generativeai.protos.CreatePermissionRequest.__ge__": true, + "google.generativeai.protos.CreatePermissionRequest.__gt__": true, + "google.generativeai.protos.CreatePermissionRequest.__init__": true, + "google.generativeai.protos.CreatePermissionRequest.__le__": true, + "google.generativeai.protos.CreatePermissionRequest.__lt__": true, + "google.generativeai.protos.CreatePermissionRequest.__ne__": true, + "google.generativeai.protos.CreatePermissionRequest.__new__": true, + "google.generativeai.protos.CreatePermissionRequest.__or__": true, + "google.generativeai.protos.CreatePermissionRequest.__ror__": true, + "google.generativeai.protos.CreatePermissionRequest.copy_from": true, + "google.generativeai.protos.CreatePermissionRequest.deserialize": true, + "google.generativeai.protos.CreatePermissionRequest.from_json": true, + "google.generativeai.protos.CreatePermissionRequest.mro": true, + "google.generativeai.protos.CreatePermissionRequest.parent": true, + "google.generativeai.protos.CreatePermissionRequest.pb": true, + "google.generativeai.protos.CreatePermissionRequest.permission": true, + "google.generativeai.protos.CreatePermissionRequest.serialize": true, + "google.generativeai.protos.CreatePermissionRequest.to_dict": true, + "google.generativeai.protos.CreatePermissionRequest.to_json": true, + "google.generativeai.protos.CreatePermissionRequest.wrap": true, + "google.generativeai.protos.CreateTunedModelMetadata": false, + "google.generativeai.protos.CreateTunedModelMetadata.__call__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__eq__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__ge__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__gt__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__init__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__le__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__lt__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__ne__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__new__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__or__": true, + "google.generativeai.protos.CreateTunedModelMetadata.__ror__": true, + "google.generativeai.protos.CreateTunedModelMetadata.completed_percent": true, + "google.generativeai.protos.CreateTunedModelMetadata.completed_steps": true, + "google.generativeai.protos.CreateTunedModelMetadata.copy_from": true, + "google.generativeai.protos.CreateTunedModelMetadata.deserialize": true, + "google.generativeai.protos.CreateTunedModelMetadata.from_json": true, + "google.generativeai.protos.CreateTunedModelMetadata.mro": true, + "google.generativeai.protos.CreateTunedModelMetadata.pb": true, + "google.generativeai.protos.CreateTunedModelMetadata.serialize": true, + "google.generativeai.protos.CreateTunedModelMetadata.snapshots": true, + "google.generativeai.protos.CreateTunedModelMetadata.to_dict": true, + "google.generativeai.protos.CreateTunedModelMetadata.to_json": true, + "google.generativeai.protos.CreateTunedModelMetadata.total_steps": true, + "google.generativeai.protos.CreateTunedModelMetadata.tuned_model": true, + "google.generativeai.protos.CreateTunedModelMetadata.wrap": true, + "google.generativeai.protos.CreateTunedModelRequest": false, + "google.generativeai.protos.CreateTunedModelRequest.__call__": true, + "google.generativeai.protos.CreateTunedModelRequest.__eq__": true, + "google.generativeai.protos.CreateTunedModelRequest.__ge__": true, + "google.generativeai.protos.CreateTunedModelRequest.__gt__": true, + "google.generativeai.protos.CreateTunedModelRequest.__init__": true, + "google.generativeai.protos.CreateTunedModelRequest.__le__": true, + "google.generativeai.protos.CreateTunedModelRequest.__lt__": true, + "google.generativeai.protos.CreateTunedModelRequest.__ne__": true, + "google.generativeai.protos.CreateTunedModelRequest.__new__": true, + "google.generativeai.protos.CreateTunedModelRequest.__or__": true, + "google.generativeai.protos.CreateTunedModelRequest.__ror__": true, + "google.generativeai.protos.CreateTunedModelRequest.copy_from": true, + "google.generativeai.protos.CreateTunedModelRequest.deserialize": true, + "google.generativeai.protos.CreateTunedModelRequest.from_json": true, + "google.generativeai.protos.CreateTunedModelRequest.mro": true, + "google.generativeai.protos.CreateTunedModelRequest.pb": true, + "google.generativeai.protos.CreateTunedModelRequest.serialize": true, + "google.generativeai.protos.CreateTunedModelRequest.to_dict": true, + "google.generativeai.protos.CreateTunedModelRequest.to_json": true, + "google.generativeai.protos.CreateTunedModelRequest.tuned_model": true, + "google.generativeai.protos.CreateTunedModelRequest.tuned_model_id": true, + "google.generativeai.protos.CreateTunedModelRequest.wrap": true, + "google.generativeai.protos.CustomMetadata": false, + "google.generativeai.protos.CustomMetadata.__call__": true, + "google.generativeai.protos.CustomMetadata.__eq__": true, + "google.generativeai.protos.CustomMetadata.__ge__": true, + "google.generativeai.protos.CustomMetadata.__gt__": true, + "google.generativeai.protos.CustomMetadata.__init__": true, + "google.generativeai.protos.CustomMetadata.__le__": true, + "google.generativeai.protos.CustomMetadata.__lt__": true, + "google.generativeai.protos.CustomMetadata.__ne__": true, + "google.generativeai.protos.CustomMetadata.__new__": true, + "google.generativeai.protos.CustomMetadata.__or__": true, + "google.generativeai.protos.CustomMetadata.__ror__": true, + "google.generativeai.protos.CustomMetadata.copy_from": true, + "google.generativeai.protos.CustomMetadata.deserialize": true, + "google.generativeai.protos.CustomMetadata.from_json": true, + "google.generativeai.protos.CustomMetadata.key": true, + "google.generativeai.protos.CustomMetadata.mro": true, + "google.generativeai.protos.CustomMetadata.numeric_value": true, + "google.generativeai.protos.CustomMetadata.pb": true, + "google.generativeai.protos.CustomMetadata.serialize": true, + "google.generativeai.protos.CustomMetadata.string_list_value": true, + "google.generativeai.protos.CustomMetadata.string_value": true, + "google.generativeai.protos.CustomMetadata.to_dict": true, + "google.generativeai.protos.CustomMetadata.to_json": true, + "google.generativeai.protos.CustomMetadata.wrap": true, + "google.generativeai.protos.Dataset": false, + "google.generativeai.protos.Dataset.__call__": true, + "google.generativeai.protos.Dataset.__eq__": true, + "google.generativeai.protos.Dataset.__ge__": true, + "google.generativeai.protos.Dataset.__gt__": true, + "google.generativeai.protos.Dataset.__init__": true, + "google.generativeai.protos.Dataset.__le__": true, + "google.generativeai.protos.Dataset.__lt__": true, + "google.generativeai.protos.Dataset.__ne__": true, + "google.generativeai.protos.Dataset.__new__": true, + "google.generativeai.protos.Dataset.__or__": true, + "google.generativeai.protos.Dataset.__ror__": true, + "google.generativeai.protos.Dataset.copy_from": true, + "google.generativeai.protos.Dataset.deserialize": true, + "google.generativeai.protos.Dataset.examples": true, + "google.generativeai.protos.Dataset.from_json": true, + "google.generativeai.protos.Dataset.mro": true, + "google.generativeai.protos.Dataset.pb": true, + "google.generativeai.protos.Dataset.serialize": true, + "google.generativeai.protos.Dataset.to_dict": true, + "google.generativeai.protos.Dataset.to_json": true, + "google.generativeai.protos.Dataset.wrap": true, + "google.generativeai.protos.DeleteCachedContentRequest": false, + "google.generativeai.protos.DeleteCachedContentRequest.__call__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__eq__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__ge__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__gt__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__init__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__le__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__lt__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__ne__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__new__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__or__": true, + "google.generativeai.protos.DeleteCachedContentRequest.__ror__": true, + "google.generativeai.protos.DeleteCachedContentRequest.copy_from": true, + "google.generativeai.protos.DeleteCachedContentRequest.deserialize": true, + "google.generativeai.protos.DeleteCachedContentRequest.from_json": true, + "google.generativeai.protos.DeleteCachedContentRequest.mro": true, + "google.generativeai.protos.DeleteCachedContentRequest.name": true, + "google.generativeai.protos.DeleteCachedContentRequest.pb": true, + "google.generativeai.protos.DeleteCachedContentRequest.serialize": true, + "google.generativeai.protos.DeleteCachedContentRequest.to_dict": true, + "google.generativeai.protos.DeleteCachedContentRequest.to_json": true, + "google.generativeai.protos.DeleteCachedContentRequest.wrap": true, + "google.generativeai.protos.DeleteChunkRequest": false, + "google.generativeai.protos.DeleteChunkRequest.__call__": true, + "google.generativeai.protos.DeleteChunkRequest.__eq__": true, + "google.generativeai.protos.DeleteChunkRequest.__ge__": true, + "google.generativeai.protos.DeleteChunkRequest.__gt__": true, + "google.generativeai.protos.DeleteChunkRequest.__init__": true, + "google.generativeai.protos.DeleteChunkRequest.__le__": true, + "google.generativeai.protos.DeleteChunkRequest.__lt__": true, + "google.generativeai.protos.DeleteChunkRequest.__ne__": true, + "google.generativeai.protos.DeleteChunkRequest.__new__": true, + "google.generativeai.protos.DeleteChunkRequest.__or__": true, + "google.generativeai.protos.DeleteChunkRequest.__ror__": true, + "google.generativeai.protos.DeleteChunkRequest.copy_from": true, + "google.generativeai.protos.DeleteChunkRequest.deserialize": true, + "google.generativeai.protos.DeleteChunkRequest.from_json": true, + "google.generativeai.protos.DeleteChunkRequest.mro": true, + "google.generativeai.protos.DeleteChunkRequest.name": true, + "google.generativeai.protos.DeleteChunkRequest.pb": true, + "google.generativeai.protos.DeleteChunkRequest.serialize": true, + "google.generativeai.protos.DeleteChunkRequest.to_dict": true, + "google.generativeai.protos.DeleteChunkRequest.to_json": true, + "google.generativeai.protos.DeleteChunkRequest.wrap": true, + "google.generativeai.protos.DeleteCorpusRequest": false, + "google.generativeai.protos.DeleteCorpusRequest.__call__": true, + "google.generativeai.protos.DeleteCorpusRequest.__eq__": true, + "google.generativeai.protos.DeleteCorpusRequest.__ge__": true, + "google.generativeai.protos.DeleteCorpusRequest.__gt__": true, + "google.generativeai.protos.DeleteCorpusRequest.__init__": true, + "google.generativeai.protos.DeleteCorpusRequest.__le__": true, + "google.generativeai.protos.DeleteCorpusRequest.__lt__": true, + "google.generativeai.protos.DeleteCorpusRequest.__ne__": true, + "google.generativeai.protos.DeleteCorpusRequest.__new__": true, + "google.generativeai.protos.DeleteCorpusRequest.__or__": true, + "google.generativeai.protos.DeleteCorpusRequest.__ror__": true, + "google.generativeai.protos.DeleteCorpusRequest.copy_from": true, + "google.generativeai.protos.DeleteCorpusRequest.deserialize": true, + "google.generativeai.protos.DeleteCorpusRequest.force": true, + "google.generativeai.protos.DeleteCorpusRequest.from_json": true, + "google.generativeai.protos.DeleteCorpusRequest.mro": true, + "google.generativeai.protos.DeleteCorpusRequest.name": true, + "google.generativeai.protos.DeleteCorpusRequest.pb": true, + "google.generativeai.protos.DeleteCorpusRequest.serialize": true, + "google.generativeai.protos.DeleteCorpusRequest.to_dict": true, + "google.generativeai.protos.DeleteCorpusRequest.to_json": true, + "google.generativeai.protos.DeleteCorpusRequest.wrap": true, + "google.generativeai.protos.DeleteDocumentRequest": false, + "google.generativeai.protos.DeleteDocumentRequest.__call__": true, + "google.generativeai.protos.DeleteDocumentRequest.__eq__": true, + "google.generativeai.protos.DeleteDocumentRequest.__ge__": true, + "google.generativeai.protos.DeleteDocumentRequest.__gt__": true, + "google.generativeai.protos.DeleteDocumentRequest.__init__": true, + "google.generativeai.protos.DeleteDocumentRequest.__le__": true, + "google.generativeai.protos.DeleteDocumentRequest.__lt__": true, + "google.generativeai.protos.DeleteDocumentRequest.__ne__": true, + "google.generativeai.protos.DeleteDocumentRequest.__new__": true, + "google.generativeai.protos.DeleteDocumentRequest.__or__": true, + "google.generativeai.protos.DeleteDocumentRequest.__ror__": true, + "google.generativeai.protos.DeleteDocumentRequest.copy_from": true, + "google.generativeai.protos.DeleteDocumentRequest.deserialize": true, + "google.generativeai.protos.DeleteDocumentRequest.force": true, + "google.generativeai.protos.DeleteDocumentRequest.from_json": true, + "google.generativeai.protos.DeleteDocumentRequest.mro": true, + "google.generativeai.protos.DeleteDocumentRequest.name": true, + "google.generativeai.protos.DeleteDocumentRequest.pb": true, + "google.generativeai.protos.DeleteDocumentRequest.serialize": true, + "google.generativeai.protos.DeleteDocumentRequest.to_dict": true, + "google.generativeai.protos.DeleteDocumentRequest.to_json": true, + "google.generativeai.protos.DeleteDocumentRequest.wrap": true, + "google.generativeai.protos.DeleteFileRequest": false, + "google.generativeai.protos.DeleteFileRequest.__call__": true, + "google.generativeai.protos.DeleteFileRequest.__eq__": true, + "google.generativeai.protos.DeleteFileRequest.__ge__": true, + "google.generativeai.protos.DeleteFileRequest.__gt__": true, + "google.generativeai.protos.DeleteFileRequest.__init__": true, + "google.generativeai.protos.DeleteFileRequest.__le__": true, + "google.generativeai.protos.DeleteFileRequest.__lt__": true, + "google.generativeai.protos.DeleteFileRequest.__ne__": true, + "google.generativeai.protos.DeleteFileRequest.__new__": true, + "google.generativeai.protos.DeleteFileRequest.__or__": true, + "google.generativeai.protos.DeleteFileRequest.__ror__": true, + "google.generativeai.protos.DeleteFileRequest.copy_from": true, + "google.generativeai.protos.DeleteFileRequest.deserialize": true, + "google.generativeai.protos.DeleteFileRequest.from_json": true, + "google.generativeai.protos.DeleteFileRequest.mro": true, + "google.generativeai.protos.DeleteFileRequest.name": true, + "google.generativeai.protos.DeleteFileRequest.pb": true, + "google.generativeai.protos.DeleteFileRequest.serialize": true, + "google.generativeai.protos.DeleteFileRequest.to_dict": true, + "google.generativeai.protos.DeleteFileRequest.to_json": true, + "google.generativeai.protos.DeleteFileRequest.wrap": true, + "google.generativeai.protos.DeletePermissionRequest": false, + "google.generativeai.protos.DeletePermissionRequest.__call__": true, + "google.generativeai.protos.DeletePermissionRequest.__eq__": true, + "google.generativeai.protos.DeletePermissionRequest.__ge__": true, + "google.generativeai.protos.DeletePermissionRequest.__gt__": true, + "google.generativeai.protos.DeletePermissionRequest.__init__": true, + "google.generativeai.protos.DeletePermissionRequest.__le__": true, + "google.generativeai.protos.DeletePermissionRequest.__lt__": true, + "google.generativeai.protos.DeletePermissionRequest.__ne__": true, + "google.generativeai.protos.DeletePermissionRequest.__new__": true, + "google.generativeai.protos.DeletePermissionRequest.__or__": true, + "google.generativeai.protos.DeletePermissionRequest.__ror__": true, + "google.generativeai.protos.DeletePermissionRequest.copy_from": true, + "google.generativeai.protos.DeletePermissionRequest.deserialize": true, + "google.generativeai.protos.DeletePermissionRequest.from_json": true, + "google.generativeai.protos.DeletePermissionRequest.mro": true, + "google.generativeai.protos.DeletePermissionRequest.name": true, + "google.generativeai.protos.DeletePermissionRequest.pb": true, + "google.generativeai.protos.DeletePermissionRequest.serialize": true, + "google.generativeai.protos.DeletePermissionRequest.to_dict": true, + "google.generativeai.protos.DeletePermissionRequest.to_json": true, + "google.generativeai.protos.DeletePermissionRequest.wrap": true, + "google.generativeai.protos.DeleteTunedModelRequest": false, + "google.generativeai.protos.DeleteTunedModelRequest.__call__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__eq__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__ge__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__gt__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__init__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__le__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__lt__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__ne__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__new__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__or__": true, + "google.generativeai.protos.DeleteTunedModelRequest.__ror__": true, + "google.generativeai.protos.DeleteTunedModelRequest.copy_from": true, + "google.generativeai.protos.DeleteTunedModelRequest.deserialize": true, + "google.generativeai.protos.DeleteTunedModelRequest.from_json": true, + "google.generativeai.protos.DeleteTunedModelRequest.mro": true, + "google.generativeai.protos.DeleteTunedModelRequest.name": true, + "google.generativeai.protos.DeleteTunedModelRequest.pb": true, + "google.generativeai.protos.DeleteTunedModelRequest.serialize": true, + "google.generativeai.protos.DeleteTunedModelRequest.to_dict": true, + "google.generativeai.protos.DeleteTunedModelRequest.to_json": true, + "google.generativeai.protos.DeleteTunedModelRequest.wrap": true, + "google.generativeai.protos.Document": false, + "google.generativeai.protos.Document.__call__": true, + "google.generativeai.protos.Document.__eq__": true, + "google.generativeai.protos.Document.__ge__": true, + "google.generativeai.protos.Document.__gt__": true, + "google.generativeai.protos.Document.__init__": true, + "google.generativeai.protos.Document.__le__": true, + "google.generativeai.protos.Document.__lt__": true, + "google.generativeai.protos.Document.__ne__": true, + "google.generativeai.protos.Document.__new__": true, + "google.generativeai.protos.Document.__or__": true, + "google.generativeai.protos.Document.__ror__": true, + "google.generativeai.protos.Document.copy_from": true, + "google.generativeai.protos.Document.create_time": true, + "google.generativeai.protos.Document.custom_metadata": true, + "google.generativeai.protos.Document.deserialize": true, + "google.generativeai.protos.Document.display_name": true, + "google.generativeai.protos.Document.from_json": true, + "google.generativeai.protos.Document.mro": true, + "google.generativeai.protos.Document.name": true, + "google.generativeai.protos.Document.pb": true, + "google.generativeai.protos.Document.serialize": true, + "google.generativeai.protos.Document.to_dict": true, + "google.generativeai.protos.Document.to_json": true, + "google.generativeai.protos.Document.update_time": true, + "google.generativeai.protos.Document.wrap": true, + "google.generativeai.protos.EmbedContentRequest": false, + "google.generativeai.protos.EmbedContentRequest.__call__": true, + "google.generativeai.protos.EmbedContentRequest.__eq__": true, + "google.generativeai.protos.EmbedContentRequest.__ge__": true, + "google.generativeai.protos.EmbedContentRequest.__gt__": true, + "google.generativeai.protos.EmbedContentRequest.__init__": true, + "google.generativeai.protos.EmbedContentRequest.__le__": true, + "google.generativeai.protos.EmbedContentRequest.__lt__": true, + "google.generativeai.protos.EmbedContentRequest.__ne__": true, + "google.generativeai.protos.EmbedContentRequest.__new__": true, + "google.generativeai.protos.EmbedContentRequest.__or__": true, + "google.generativeai.protos.EmbedContentRequest.__ror__": true, + "google.generativeai.protos.EmbedContentRequest.content": true, + "google.generativeai.protos.EmbedContentRequest.copy_from": true, + "google.generativeai.protos.EmbedContentRequest.deserialize": true, + "google.generativeai.protos.EmbedContentRequest.from_json": true, + "google.generativeai.protos.EmbedContentRequest.model": true, + "google.generativeai.protos.EmbedContentRequest.mro": true, + "google.generativeai.protos.EmbedContentRequest.output_dimensionality": true, + "google.generativeai.protos.EmbedContentRequest.pb": true, + "google.generativeai.protos.EmbedContentRequest.serialize": true, + "google.generativeai.protos.EmbedContentRequest.task_type": true, + "google.generativeai.protos.EmbedContentRequest.title": true, + "google.generativeai.protos.EmbedContentRequest.to_dict": true, + "google.generativeai.protos.EmbedContentRequest.to_json": true, + "google.generativeai.protos.EmbedContentRequest.wrap": true, + "google.generativeai.protos.EmbedContentResponse": false, + "google.generativeai.protos.EmbedContentResponse.__call__": true, + "google.generativeai.protos.EmbedContentResponse.__eq__": true, + "google.generativeai.protos.EmbedContentResponse.__ge__": true, + "google.generativeai.protos.EmbedContentResponse.__gt__": true, + "google.generativeai.protos.EmbedContentResponse.__init__": true, + "google.generativeai.protos.EmbedContentResponse.__le__": true, + "google.generativeai.protos.EmbedContentResponse.__lt__": true, + "google.generativeai.protos.EmbedContentResponse.__ne__": true, + "google.generativeai.protos.EmbedContentResponse.__new__": true, + "google.generativeai.protos.EmbedContentResponse.__or__": true, + "google.generativeai.protos.EmbedContentResponse.__ror__": true, + "google.generativeai.protos.EmbedContentResponse.copy_from": true, + "google.generativeai.protos.EmbedContentResponse.deserialize": true, + "google.generativeai.protos.EmbedContentResponse.embedding": true, + "google.generativeai.protos.EmbedContentResponse.from_json": true, + "google.generativeai.protos.EmbedContentResponse.mro": true, + "google.generativeai.protos.EmbedContentResponse.pb": true, + "google.generativeai.protos.EmbedContentResponse.serialize": true, + "google.generativeai.protos.EmbedContentResponse.to_dict": true, + "google.generativeai.protos.EmbedContentResponse.to_json": true, + "google.generativeai.protos.EmbedContentResponse.wrap": true, + "google.generativeai.protos.EmbedTextRequest": false, + "google.generativeai.protos.EmbedTextRequest.__call__": true, + "google.generativeai.protos.EmbedTextRequest.__eq__": true, + "google.generativeai.protos.EmbedTextRequest.__ge__": true, + "google.generativeai.protos.EmbedTextRequest.__gt__": true, + "google.generativeai.protos.EmbedTextRequest.__init__": true, + "google.generativeai.protos.EmbedTextRequest.__le__": true, + "google.generativeai.protos.EmbedTextRequest.__lt__": true, + "google.generativeai.protos.EmbedTextRequest.__ne__": true, + "google.generativeai.protos.EmbedTextRequest.__new__": true, + "google.generativeai.protos.EmbedTextRequest.__or__": true, + "google.generativeai.protos.EmbedTextRequest.__ror__": true, + "google.generativeai.protos.EmbedTextRequest.copy_from": true, + "google.generativeai.protos.EmbedTextRequest.deserialize": true, + "google.generativeai.protos.EmbedTextRequest.from_json": true, + "google.generativeai.protos.EmbedTextRequest.model": true, + "google.generativeai.protos.EmbedTextRequest.mro": true, + "google.generativeai.protos.EmbedTextRequest.pb": true, + "google.generativeai.protos.EmbedTextRequest.serialize": true, + "google.generativeai.protos.EmbedTextRequest.text": true, + "google.generativeai.protos.EmbedTextRequest.to_dict": true, + "google.generativeai.protos.EmbedTextRequest.to_json": true, + "google.generativeai.protos.EmbedTextRequest.wrap": true, + "google.generativeai.protos.EmbedTextResponse": false, + "google.generativeai.protos.EmbedTextResponse.__call__": true, + "google.generativeai.protos.EmbedTextResponse.__eq__": true, + "google.generativeai.protos.EmbedTextResponse.__ge__": true, + "google.generativeai.protos.EmbedTextResponse.__gt__": true, + "google.generativeai.protos.EmbedTextResponse.__init__": true, + "google.generativeai.protos.EmbedTextResponse.__le__": true, + "google.generativeai.protos.EmbedTextResponse.__lt__": true, + "google.generativeai.protos.EmbedTextResponse.__ne__": true, + "google.generativeai.protos.EmbedTextResponse.__new__": true, + "google.generativeai.protos.EmbedTextResponse.__or__": true, + "google.generativeai.protos.EmbedTextResponse.__ror__": true, + "google.generativeai.protos.EmbedTextResponse.copy_from": true, + "google.generativeai.protos.EmbedTextResponse.deserialize": true, + "google.generativeai.protos.EmbedTextResponse.embedding": true, + "google.generativeai.protos.EmbedTextResponse.from_json": true, + "google.generativeai.protos.EmbedTextResponse.mro": true, + "google.generativeai.protos.EmbedTextResponse.pb": true, + "google.generativeai.protos.EmbedTextResponse.serialize": true, + "google.generativeai.protos.EmbedTextResponse.to_dict": true, + "google.generativeai.protos.EmbedTextResponse.to_json": true, + "google.generativeai.protos.EmbedTextResponse.wrap": true, + "google.generativeai.protos.Embedding": false, + "google.generativeai.protos.Embedding.__call__": true, + "google.generativeai.protos.Embedding.__eq__": true, + "google.generativeai.protos.Embedding.__ge__": true, + "google.generativeai.protos.Embedding.__gt__": true, + "google.generativeai.protos.Embedding.__init__": true, + "google.generativeai.protos.Embedding.__le__": true, + "google.generativeai.protos.Embedding.__lt__": true, + "google.generativeai.protos.Embedding.__ne__": true, + "google.generativeai.protos.Embedding.__new__": true, + "google.generativeai.protos.Embedding.__or__": true, + "google.generativeai.protos.Embedding.__ror__": true, + "google.generativeai.protos.Embedding.copy_from": true, + "google.generativeai.protos.Embedding.deserialize": true, + "google.generativeai.protos.Embedding.from_json": true, + "google.generativeai.protos.Embedding.mro": true, + "google.generativeai.protos.Embedding.pb": true, + "google.generativeai.protos.Embedding.serialize": true, + "google.generativeai.protos.Embedding.to_dict": true, + "google.generativeai.protos.Embedding.to_json": true, + "google.generativeai.protos.Embedding.value": true, + "google.generativeai.protos.Embedding.wrap": true, + "google.generativeai.protos.Example": false, + "google.generativeai.protos.Example.__call__": true, + "google.generativeai.protos.Example.__eq__": true, + "google.generativeai.protos.Example.__ge__": true, + "google.generativeai.protos.Example.__gt__": true, + "google.generativeai.protos.Example.__init__": true, + "google.generativeai.protos.Example.__le__": true, + "google.generativeai.protos.Example.__lt__": true, + "google.generativeai.protos.Example.__ne__": true, + "google.generativeai.protos.Example.__new__": true, + "google.generativeai.protos.Example.__or__": true, + "google.generativeai.protos.Example.__ror__": true, + "google.generativeai.protos.Example.copy_from": true, + "google.generativeai.protos.Example.deserialize": true, + "google.generativeai.protos.Example.from_json": true, + "google.generativeai.protos.Example.input": true, + "google.generativeai.protos.Example.mro": true, + "google.generativeai.protos.Example.output": true, + "google.generativeai.protos.Example.pb": true, + "google.generativeai.protos.Example.serialize": true, + "google.generativeai.protos.Example.to_dict": true, + "google.generativeai.protos.Example.to_json": true, + "google.generativeai.protos.Example.wrap": true, + "google.generativeai.protos.ExecutableCode": false, + "google.generativeai.protos.ExecutableCode.Language": false, + "google.generativeai.protos.ExecutableCode.Language.LANGUAGE_UNSPECIFIED": true, + "google.generativeai.protos.ExecutableCode.Language.PYTHON": true, + "google.generativeai.protos.ExecutableCode.Language.__abs__": true, + "google.generativeai.protos.ExecutableCode.Language.__add__": true, + "google.generativeai.protos.ExecutableCode.Language.__and__": true, + "google.generativeai.protos.ExecutableCode.Language.__bool__": true, + "google.generativeai.protos.ExecutableCode.Language.__contains__": true, + "google.generativeai.protos.ExecutableCode.Language.__eq__": true, + "google.generativeai.protos.ExecutableCode.Language.__floordiv__": true, + "google.generativeai.protos.ExecutableCode.Language.__ge__": true, + "google.generativeai.protos.ExecutableCode.Language.__getitem__": true, + "google.generativeai.protos.ExecutableCode.Language.__gt__": true, + "google.generativeai.protos.ExecutableCode.Language.__init__": true, + "google.generativeai.protos.ExecutableCode.Language.__invert__": true, + "google.generativeai.protos.ExecutableCode.Language.__iter__": true, + "google.generativeai.protos.ExecutableCode.Language.__le__": true, + "google.generativeai.protos.ExecutableCode.Language.__len__": true, + "google.generativeai.protos.ExecutableCode.Language.__lshift__": true, + "google.generativeai.protos.ExecutableCode.Language.__lt__": true, + "google.generativeai.protos.ExecutableCode.Language.__mod__": true, + "google.generativeai.protos.ExecutableCode.Language.__mul__": true, + "google.generativeai.protos.ExecutableCode.Language.__ne__": true, + "google.generativeai.protos.ExecutableCode.Language.__neg__": true, + "google.generativeai.protos.ExecutableCode.Language.__new__": true, + "google.generativeai.protos.ExecutableCode.Language.__or__": true, + "google.generativeai.protos.ExecutableCode.Language.__pos__": true, + "google.generativeai.protos.ExecutableCode.Language.__pow__": true, + "google.generativeai.protos.ExecutableCode.Language.__radd__": true, + "google.generativeai.protos.ExecutableCode.Language.__rand__": true, + "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": true, + "google.generativeai.protos.ExecutableCode.Language.__rlshift__": true, + "google.generativeai.protos.ExecutableCode.Language.__rmod__": true, + "google.generativeai.protos.ExecutableCode.Language.__rmul__": true, + "google.generativeai.protos.ExecutableCode.Language.__ror__": true, + "google.generativeai.protos.ExecutableCode.Language.__rpow__": true, + "google.generativeai.protos.ExecutableCode.Language.__rrshift__": true, + "google.generativeai.protos.ExecutableCode.Language.__rshift__": true, + "google.generativeai.protos.ExecutableCode.Language.__rsub__": true, + "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": true, + "google.generativeai.protos.ExecutableCode.Language.__rxor__": true, + "google.generativeai.protos.ExecutableCode.Language.__sub__": true, + "google.generativeai.protos.ExecutableCode.Language.__truediv__": true, + "google.generativeai.protos.ExecutableCode.Language.__xor__": true, + "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": true, + "google.generativeai.protos.ExecutableCode.Language.bit_count": true, + "google.generativeai.protos.ExecutableCode.Language.bit_length": true, + "google.generativeai.protos.ExecutableCode.Language.conjugate": true, + "google.generativeai.protos.ExecutableCode.Language.denominator": true, + "google.generativeai.protos.ExecutableCode.Language.from_bytes": true, + "google.generativeai.protos.ExecutableCode.Language.imag": true, + "google.generativeai.protos.ExecutableCode.Language.numerator": true, + "google.generativeai.protos.ExecutableCode.Language.real": true, + "google.generativeai.protos.ExecutableCode.Language.to_bytes": true, + "google.generativeai.protos.ExecutableCode.__call__": true, + "google.generativeai.protos.ExecutableCode.__eq__": true, + "google.generativeai.protos.ExecutableCode.__ge__": true, + "google.generativeai.protos.ExecutableCode.__gt__": true, + "google.generativeai.protos.ExecutableCode.__init__": true, + "google.generativeai.protos.ExecutableCode.__le__": true, + "google.generativeai.protos.ExecutableCode.__lt__": true, + "google.generativeai.protos.ExecutableCode.__ne__": true, + "google.generativeai.protos.ExecutableCode.__new__": true, + "google.generativeai.protos.ExecutableCode.__or__": true, + "google.generativeai.protos.ExecutableCode.__ror__": true, + "google.generativeai.protos.ExecutableCode.code": true, + "google.generativeai.protos.ExecutableCode.copy_from": true, + "google.generativeai.protos.ExecutableCode.deserialize": true, + "google.generativeai.protos.ExecutableCode.from_json": true, + "google.generativeai.protos.ExecutableCode.language": true, + "google.generativeai.protos.ExecutableCode.mro": true, + "google.generativeai.protos.ExecutableCode.pb": true, + "google.generativeai.protos.ExecutableCode.serialize": true, + "google.generativeai.protos.ExecutableCode.to_dict": true, + "google.generativeai.protos.ExecutableCode.to_json": true, + "google.generativeai.protos.ExecutableCode.wrap": true, + "google.generativeai.protos.File": false, + "google.generativeai.protos.File.State": false, + "google.generativeai.protos.File.State.ACTIVE": true, + "google.generativeai.protos.File.State.FAILED": true, + "google.generativeai.protos.File.State.PROCESSING": true, + "google.generativeai.protos.File.State.STATE_UNSPECIFIED": true, + "google.generativeai.protos.File.State.__abs__": true, + "google.generativeai.protos.File.State.__add__": true, + "google.generativeai.protos.File.State.__and__": true, + "google.generativeai.protos.File.State.__bool__": true, + "google.generativeai.protos.File.State.__contains__": true, + "google.generativeai.protos.File.State.__eq__": true, + "google.generativeai.protos.File.State.__floordiv__": true, + "google.generativeai.protos.File.State.__ge__": true, + "google.generativeai.protos.File.State.__getitem__": true, + "google.generativeai.protos.File.State.__gt__": true, + "google.generativeai.protos.File.State.__init__": true, + "google.generativeai.protos.File.State.__invert__": true, + "google.generativeai.protos.File.State.__iter__": true, + "google.generativeai.protos.File.State.__le__": true, + "google.generativeai.protos.File.State.__len__": true, + "google.generativeai.protos.File.State.__lshift__": true, + "google.generativeai.protos.File.State.__lt__": true, + "google.generativeai.protos.File.State.__mod__": true, + "google.generativeai.protos.File.State.__mul__": true, + "google.generativeai.protos.File.State.__ne__": true, + "google.generativeai.protos.File.State.__neg__": true, + "google.generativeai.protos.File.State.__new__": true, + "google.generativeai.protos.File.State.__or__": true, + "google.generativeai.protos.File.State.__pos__": true, + "google.generativeai.protos.File.State.__pow__": true, + "google.generativeai.protos.File.State.__radd__": true, + "google.generativeai.protos.File.State.__rand__": true, + "google.generativeai.protos.File.State.__rfloordiv__": true, + "google.generativeai.protos.File.State.__rlshift__": true, + "google.generativeai.protos.File.State.__rmod__": true, + "google.generativeai.protos.File.State.__rmul__": true, + "google.generativeai.protos.File.State.__ror__": true, + "google.generativeai.protos.File.State.__rpow__": true, + "google.generativeai.protos.File.State.__rrshift__": true, + "google.generativeai.protos.File.State.__rshift__": true, + "google.generativeai.protos.File.State.__rsub__": true, + "google.generativeai.protos.File.State.__rtruediv__": true, + "google.generativeai.protos.File.State.__rxor__": true, + "google.generativeai.protos.File.State.__sub__": true, + "google.generativeai.protos.File.State.__truediv__": true, + "google.generativeai.protos.File.State.__xor__": true, + "google.generativeai.protos.File.State.as_integer_ratio": true, + "google.generativeai.protos.File.State.bit_count": true, + "google.generativeai.protos.File.State.bit_length": true, + "google.generativeai.protos.File.State.conjugate": true, + "google.generativeai.protos.File.State.denominator": true, + "google.generativeai.protos.File.State.from_bytes": true, + "google.generativeai.protos.File.State.imag": true, + "google.generativeai.protos.File.State.numerator": true, + "google.generativeai.protos.File.State.real": true, + "google.generativeai.protos.File.State.to_bytes": true, + "google.generativeai.protos.File.__call__": true, + "google.generativeai.protos.File.__eq__": true, + "google.generativeai.protos.File.__ge__": true, + "google.generativeai.protos.File.__gt__": true, + "google.generativeai.protos.File.__init__": true, + "google.generativeai.protos.File.__le__": true, + "google.generativeai.protos.File.__lt__": true, + "google.generativeai.protos.File.__ne__": true, + "google.generativeai.protos.File.__new__": true, + "google.generativeai.protos.File.__or__": true, + "google.generativeai.protos.File.__ror__": true, + "google.generativeai.protos.File.copy_from": true, + "google.generativeai.protos.File.create_time": true, + "google.generativeai.protos.File.deserialize": true, + "google.generativeai.protos.File.display_name": true, + "google.generativeai.protos.File.error": true, + "google.generativeai.protos.File.expiration_time": true, + "google.generativeai.protos.File.from_json": true, + "google.generativeai.protos.File.mime_type": true, + "google.generativeai.protos.File.mro": true, + "google.generativeai.protos.File.name": true, + "google.generativeai.protos.File.pb": true, + "google.generativeai.protos.File.serialize": true, + "google.generativeai.protos.File.sha256_hash": true, + "google.generativeai.protos.File.size_bytes": true, + "google.generativeai.protos.File.state": true, + "google.generativeai.protos.File.to_dict": true, + "google.generativeai.protos.File.to_json": true, + "google.generativeai.protos.File.update_time": true, + "google.generativeai.protos.File.uri": true, + "google.generativeai.protos.File.video_metadata": true, + "google.generativeai.protos.File.wrap": true, + "google.generativeai.protos.FileData": false, + "google.generativeai.protos.FileData.__call__": true, + "google.generativeai.protos.FileData.__eq__": true, + "google.generativeai.protos.FileData.__ge__": true, + "google.generativeai.protos.FileData.__gt__": true, + "google.generativeai.protos.FileData.__init__": true, + "google.generativeai.protos.FileData.__le__": true, + "google.generativeai.protos.FileData.__lt__": true, + "google.generativeai.protos.FileData.__ne__": true, + "google.generativeai.protos.FileData.__new__": true, + "google.generativeai.protos.FileData.__or__": true, + "google.generativeai.protos.FileData.__ror__": true, + "google.generativeai.protos.FileData.copy_from": true, + "google.generativeai.protos.FileData.deserialize": true, + "google.generativeai.protos.FileData.file_uri": true, + "google.generativeai.protos.FileData.from_json": true, + "google.generativeai.protos.FileData.mime_type": true, + "google.generativeai.protos.FileData.mro": true, + "google.generativeai.protos.FileData.pb": true, + "google.generativeai.protos.FileData.serialize": true, + "google.generativeai.protos.FileData.to_dict": true, + "google.generativeai.protos.FileData.to_json": true, + "google.generativeai.protos.FileData.wrap": true, + "google.generativeai.protos.FunctionCall": false, + "google.generativeai.protos.FunctionCall.__call__": true, + "google.generativeai.protos.FunctionCall.__eq__": true, + "google.generativeai.protos.FunctionCall.__ge__": true, + "google.generativeai.protos.FunctionCall.__gt__": true, + "google.generativeai.protos.FunctionCall.__init__": true, + "google.generativeai.protos.FunctionCall.__le__": true, + "google.generativeai.protos.FunctionCall.__lt__": true, + "google.generativeai.protos.FunctionCall.__ne__": true, + "google.generativeai.protos.FunctionCall.__new__": true, + "google.generativeai.protos.FunctionCall.__or__": true, + "google.generativeai.protos.FunctionCall.__ror__": true, + "google.generativeai.protos.FunctionCall.args": true, + "google.generativeai.protos.FunctionCall.copy_from": true, + "google.generativeai.protos.FunctionCall.deserialize": true, + "google.generativeai.protos.FunctionCall.from_json": true, + "google.generativeai.protos.FunctionCall.mro": true, + "google.generativeai.protos.FunctionCall.name": true, + "google.generativeai.protos.FunctionCall.pb": true, + "google.generativeai.protos.FunctionCall.serialize": true, + "google.generativeai.protos.FunctionCall.to_dict": true, + "google.generativeai.protos.FunctionCall.to_json": true, + "google.generativeai.protos.FunctionCall.wrap": true, + "google.generativeai.protos.FunctionCallingConfig": false, + "google.generativeai.protos.FunctionCallingConfig.Mode": false, + "google.generativeai.protos.FunctionCallingConfig.Mode.ANY": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.AUTO": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.MODE_UNSPECIFIED": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.NONE": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.from_bytes": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.imag": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.real": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": true, + "google.generativeai.protos.FunctionCallingConfig.__call__": true, + "google.generativeai.protos.FunctionCallingConfig.__eq__": true, + "google.generativeai.protos.FunctionCallingConfig.__ge__": true, + "google.generativeai.protos.FunctionCallingConfig.__gt__": true, + "google.generativeai.protos.FunctionCallingConfig.__init__": true, + "google.generativeai.protos.FunctionCallingConfig.__le__": true, + "google.generativeai.protos.FunctionCallingConfig.__lt__": true, + "google.generativeai.protos.FunctionCallingConfig.__ne__": true, + "google.generativeai.protos.FunctionCallingConfig.__new__": true, + "google.generativeai.protos.FunctionCallingConfig.__or__": true, + "google.generativeai.protos.FunctionCallingConfig.__ror__": true, + "google.generativeai.protos.FunctionCallingConfig.allowed_function_names": true, + "google.generativeai.protos.FunctionCallingConfig.copy_from": true, + "google.generativeai.protos.FunctionCallingConfig.deserialize": true, + "google.generativeai.protos.FunctionCallingConfig.from_json": true, + "google.generativeai.protos.FunctionCallingConfig.mode": true, + "google.generativeai.protos.FunctionCallingConfig.mro": true, + "google.generativeai.protos.FunctionCallingConfig.pb": true, + "google.generativeai.protos.FunctionCallingConfig.serialize": true, + "google.generativeai.protos.FunctionCallingConfig.to_dict": true, + "google.generativeai.protos.FunctionCallingConfig.to_json": true, + "google.generativeai.protos.FunctionCallingConfig.wrap": true, + "google.generativeai.protos.FunctionDeclaration": false, + "google.generativeai.protos.FunctionDeclaration.__call__": true, + "google.generativeai.protos.FunctionDeclaration.__eq__": true, + "google.generativeai.protos.FunctionDeclaration.__ge__": true, + "google.generativeai.protos.FunctionDeclaration.__gt__": true, + "google.generativeai.protos.FunctionDeclaration.__init__": true, + "google.generativeai.protos.FunctionDeclaration.__le__": true, + "google.generativeai.protos.FunctionDeclaration.__lt__": true, + "google.generativeai.protos.FunctionDeclaration.__ne__": true, + "google.generativeai.protos.FunctionDeclaration.__new__": true, + "google.generativeai.protos.FunctionDeclaration.__or__": true, + "google.generativeai.protos.FunctionDeclaration.__ror__": true, + "google.generativeai.protos.FunctionDeclaration.copy_from": true, + "google.generativeai.protos.FunctionDeclaration.description": true, + "google.generativeai.protos.FunctionDeclaration.deserialize": true, + "google.generativeai.protos.FunctionDeclaration.from_json": true, + "google.generativeai.protos.FunctionDeclaration.mro": true, + "google.generativeai.protos.FunctionDeclaration.name": true, + "google.generativeai.protos.FunctionDeclaration.parameters": true, + "google.generativeai.protos.FunctionDeclaration.pb": true, + "google.generativeai.protos.FunctionDeclaration.serialize": true, + "google.generativeai.protos.FunctionDeclaration.to_dict": true, + "google.generativeai.protos.FunctionDeclaration.to_json": true, + "google.generativeai.protos.FunctionDeclaration.wrap": true, + "google.generativeai.protos.FunctionResponse": false, + "google.generativeai.protos.FunctionResponse.__call__": true, + "google.generativeai.protos.FunctionResponse.__eq__": true, + "google.generativeai.protos.FunctionResponse.__ge__": true, + "google.generativeai.protos.FunctionResponse.__gt__": true, + "google.generativeai.protos.FunctionResponse.__init__": true, + "google.generativeai.protos.FunctionResponse.__le__": true, + "google.generativeai.protos.FunctionResponse.__lt__": true, + "google.generativeai.protos.FunctionResponse.__ne__": true, + "google.generativeai.protos.FunctionResponse.__new__": true, + "google.generativeai.protos.FunctionResponse.__or__": true, + "google.generativeai.protos.FunctionResponse.__ror__": true, + "google.generativeai.protos.FunctionResponse.copy_from": true, + "google.generativeai.protos.FunctionResponse.deserialize": true, + "google.generativeai.protos.FunctionResponse.from_json": true, + "google.generativeai.protos.FunctionResponse.mro": true, + "google.generativeai.protos.FunctionResponse.name": true, + "google.generativeai.protos.FunctionResponse.pb": true, + "google.generativeai.protos.FunctionResponse.response": true, + "google.generativeai.protos.FunctionResponse.serialize": true, + "google.generativeai.protos.FunctionResponse.to_dict": true, + "google.generativeai.protos.FunctionResponse.to_json": true, + "google.generativeai.protos.FunctionResponse.wrap": true, + "google.generativeai.protos.GenerateAnswerRequest": false, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": false, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ANSWER_STYLE_UNSPECIFIED": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.VERBOSE": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.from_bytes": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": true, + "google.generativeai.protos.GenerateAnswerRequest.__call__": true, + "google.generativeai.protos.GenerateAnswerRequest.__eq__": true, + "google.generativeai.protos.GenerateAnswerRequest.__ge__": true, + "google.generativeai.protos.GenerateAnswerRequest.__gt__": true, + "google.generativeai.protos.GenerateAnswerRequest.__init__": true, + "google.generativeai.protos.GenerateAnswerRequest.__le__": true, + "google.generativeai.protos.GenerateAnswerRequest.__lt__": true, + "google.generativeai.protos.GenerateAnswerRequest.__ne__": true, + "google.generativeai.protos.GenerateAnswerRequest.__new__": true, + "google.generativeai.protos.GenerateAnswerRequest.__or__": true, + "google.generativeai.protos.GenerateAnswerRequest.__ror__": true, + "google.generativeai.protos.GenerateAnswerRequest.answer_style": true, + "google.generativeai.protos.GenerateAnswerRequest.contents": true, + "google.generativeai.protos.GenerateAnswerRequest.copy_from": true, + "google.generativeai.protos.GenerateAnswerRequest.deserialize": true, + "google.generativeai.protos.GenerateAnswerRequest.from_json": true, + "google.generativeai.protos.GenerateAnswerRequest.inline_passages": true, + "google.generativeai.protos.GenerateAnswerRequest.model": true, + "google.generativeai.protos.GenerateAnswerRequest.mro": true, + "google.generativeai.protos.GenerateAnswerRequest.pb": true, + "google.generativeai.protos.GenerateAnswerRequest.safety_settings": true, + "google.generativeai.protos.GenerateAnswerRequest.semantic_retriever": true, + "google.generativeai.protos.GenerateAnswerRequest.serialize": true, + "google.generativeai.protos.GenerateAnswerRequest.temperature": true, + "google.generativeai.protos.GenerateAnswerRequest.to_dict": true, + "google.generativeai.protos.GenerateAnswerRequest.to_json": true, + "google.generativeai.protos.GenerateAnswerRequest.wrap": true, + "google.generativeai.protos.GenerateAnswerResponse": false, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": false, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": false, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.OTHER": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.SAFETY": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.from_bytes": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__call__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__or__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ror__": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.block_reason": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.mro": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.safety_ratings": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": true, + "google.generativeai.protos.GenerateAnswerResponse.__call__": true, + "google.generativeai.protos.GenerateAnswerResponse.__eq__": true, + "google.generativeai.protos.GenerateAnswerResponse.__ge__": true, + "google.generativeai.protos.GenerateAnswerResponse.__gt__": true, + "google.generativeai.protos.GenerateAnswerResponse.__init__": true, + "google.generativeai.protos.GenerateAnswerResponse.__le__": true, + "google.generativeai.protos.GenerateAnswerResponse.__lt__": true, + "google.generativeai.protos.GenerateAnswerResponse.__ne__": true, + "google.generativeai.protos.GenerateAnswerResponse.__new__": true, + "google.generativeai.protos.GenerateAnswerResponse.__or__": true, + "google.generativeai.protos.GenerateAnswerResponse.__ror__": true, + "google.generativeai.protos.GenerateAnswerResponse.answer": true, + "google.generativeai.protos.GenerateAnswerResponse.answerable_probability": true, + "google.generativeai.protos.GenerateAnswerResponse.copy_from": true, + "google.generativeai.protos.GenerateAnswerResponse.deserialize": true, + "google.generativeai.protos.GenerateAnswerResponse.from_json": true, + "google.generativeai.protos.GenerateAnswerResponse.input_feedback": true, + "google.generativeai.protos.GenerateAnswerResponse.mro": true, + "google.generativeai.protos.GenerateAnswerResponse.pb": true, + "google.generativeai.protos.GenerateAnswerResponse.serialize": true, + "google.generativeai.protos.GenerateAnswerResponse.to_dict": true, + "google.generativeai.protos.GenerateAnswerResponse.to_json": true, + "google.generativeai.protos.GenerateAnswerResponse.wrap": true, + "google.generativeai.protos.GenerateContentRequest": false, + "google.generativeai.protos.GenerateContentRequest.__call__": true, + "google.generativeai.protos.GenerateContentRequest.__eq__": true, + "google.generativeai.protos.GenerateContentRequest.__ge__": true, + "google.generativeai.protos.GenerateContentRequest.__gt__": true, + "google.generativeai.protos.GenerateContentRequest.__init__": true, + "google.generativeai.protos.GenerateContentRequest.__le__": true, + "google.generativeai.protos.GenerateContentRequest.__lt__": true, + "google.generativeai.protos.GenerateContentRequest.__ne__": true, + "google.generativeai.protos.GenerateContentRequest.__new__": true, + "google.generativeai.protos.GenerateContentRequest.__or__": true, + "google.generativeai.protos.GenerateContentRequest.__ror__": true, + "google.generativeai.protos.GenerateContentRequest.cached_content": true, + "google.generativeai.protos.GenerateContentRequest.contents": true, + "google.generativeai.protos.GenerateContentRequest.copy_from": true, + "google.generativeai.protos.GenerateContentRequest.deserialize": true, + "google.generativeai.protos.GenerateContentRequest.from_json": true, + "google.generativeai.protos.GenerateContentRequest.generation_config": true, + "google.generativeai.protos.GenerateContentRequest.model": true, + "google.generativeai.protos.GenerateContentRequest.mro": true, + "google.generativeai.protos.GenerateContentRequest.pb": true, + "google.generativeai.protos.GenerateContentRequest.safety_settings": true, + "google.generativeai.protos.GenerateContentRequest.serialize": true, + "google.generativeai.protos.GenerateContentRequest.system_instruction": true, + "google.generativeai.protos.GenerateContentRequest.to_dict": true, + "google.generativeai.protos.GenerateContentRequest.to_json": true, + "google.generativeai.protos.GenerateContentRequest.tool_config": true, + "google.generativeai.protos.GenerateContentRequest.tools": true, + "google.generativeai.protos.GenerateContentRequest.wrap": true, + "google.generativeai.protos.GenerateContentResponse": false, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback": false, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": false, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.OTHER": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.SAFETY": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.from_bytes": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__call__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__or__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ror__": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.block_reason": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.mro": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.safety_ratings": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata": false, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__call__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__or__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ror__": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.cached_content_token_count": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.candidates_token_count": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.mro": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.prompt_token_count": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.total_token_count": true, + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": true, + "google.generativeai.protos.GenerateContentResponse.__call__": true, + "google.generativeai.protos.GenerateContentResponse.__eq__": true, + "google.generativeai.protos.GenerateContentResponse.__ge__": true, + "google.generativeai.protos.GenerateContentResponse.__gt__": true, + "google.generativeai.protos.GenerateContentResponse.__init__": true, + "google.generativeai.protos.GenerateContentResponse.__le__": true, + "google.generativeai.protos.GenerateContentResponse.__lt__": true, + "google.generativeai.protos.GenerateContentResponse.__ne__": true, + "google.generativeai.protos.GenerateContentResponse.__new__": true, + "google.generativeai.protos.GenerateContentResponse.__or__": true, + "google.generativeai.protos.GenerateContentResponse.__ror__": true, + "google.generativeai.protos.GenerateContentResponse.candidates": true, + "google.generativeai.protos.GenerateContentResponse.copy_from": true, + "google.generativeai.protos.GenerateContentResponse.deserialize": true, + "google.generativeai.protos.GenerateContentResponse.from_json": true, + "google.generativeai.protos.GenerateContentResponse.mro": true, + "google.generativeai.protos.GenerateContentResponse.pb": true, + "google.generativeai.protos.GenerateContentResponse.prompt_feedback": true, + "google.generativeai.protos.GenerateContentResponse.serialize": true, + "google.generativeai.protos.GenerateContentResponse.to_dict": true, + "google.generativeai.protos.GenerateContentResponse.to_json": true, + "google.generativeai.protos.GenerateContentResponse.usage_metadata": true, + "google.generativeai.protos.GenerateContentResponse.wrap": true, + "google.generativeai.protos.GenerateMessageRequest": false, + "google.generativeai.protos.GenerateMessageRequest.__call__": true, + "google.generativeai.protos.GenerateMessageRequest.__eq__": true, + "google.generativeai.protos.GenerateMessageRequest.__ge__": true, + "google.generativeai.protos.GenerateMessageRequest.__gt__": true, + "google.generativeai.protos.GenerateMessageRequest.__init__": true, + "google.generativeai.protos.GenerateMessageRequest.__le__": true, + "google.generativeai.protos.GenerateMessageRequest.__lt__": true, + "google.generativeai.protos.GenerateMessageRequest.__ne__": true, + "google.generativeai.protos.GenerateMessageRequest.__new__": true, + "google.generativeai.protos.GenerateMessageRequest.__or__": true, + "google.generativeai.protos.GenerateMessageRequest.__ror__": true, + "google.generativeai.protos.GenerateMessageRequest.candidate_count": true, + "google.generativeai.protos.GenerateMessageRequest.copy_from": true, + "google.generativeai.protos.GenerateMessageRequest.deserialize": true, + "google.generativeai.protos.GenerateMessageRequest.from_json": true, + "google.generativeai.protos.GenerateMessageRequest.model": true, + "google.generativeai.protos.GenerateMessageRequest.mro": true, + "google.generativeai.protos.GenerateMessageRequest.pb": true, + "google.generativeai.protos.GenerateMessageRequest.prompt": true, + "google.generativeai.protos.GenerateMessageRequest.serialize": true, + "google.generativeai.protos.GenerateMessageRequest.temperature": true, + "google.generativeai.protos.GenerateMessageRequest.to_dict": true, + "google.generativeai.protos.GenerateMessageRequest.to_json": true, + "google.generativeai.protos.GenerateMessageRequest.top_k": true, + "google.generativeai.protos.GenerateMessageRequest.top_p": true, + "google.generativeai.protos.GenerateMessageRequest.wrap": true, + "google.generativeai.protos.GenerateMessageResponse": false, + "google.generativeai.protos.GenerateMessageResponse.__call__": true, + "google.generativeai.protos.GenerateMessageResponse.__eq__": true, + "google.generativeai.protos.GenerateMessageResponse.__ge__": true, + "google.generativeai.protos.GenerateMessageResponse.__gt__": true, + "google.generativeai.protos.GenerateMessageResponse.__init__": true, + "google.generativeai.protos.GenerateMessageResponse.__le__": true, + "google.generativeai.protos.GenerateMessageResponse.__lt__": true, + "google.generativeai.protos.GenerateMessageResponse.__ne__": true, + "google.generativeai.protos.GenerateMessageResponse.__new__": true, + "google.generativeai.protos.GenerateMessageResponse.__or__": true, + "google.generativeai.protos.GenerateMessageResponse.__ror__": true, + "google.generativeai.protos.GenerateMessageResponse.candidates": true, + "google.generativeai.protos.GenerateMessageResponse.copy_from": true, + "google.generativeai.protos.GenerateMessageResponse.deserialize": true, + "google.generativeai.protos.GenerateMessageResponse.filters": true, + "google.generativeai.protos.GenerateMessageResponse.from_json": true, + "google.generativeai.protos.GenerateMessageResponse.messages": true, + "google.generativeai.protos.GenerateMessageResponse.mro": true, + "google.generativeai.protos.GenerateMessageResponse.pb": true, + "google.generativeai.protos.GenerateMessageResponse.serialize": true, + "google.generativeai.protos.GenerateMessageResponse.to_dict": true, + "google.generativeai.protos.GenerateMessageResponse.to_json": true, + "google.generativeai.protos.GenerateMessageResponse.wrap": true, + "google.generativeai.protos.GenerateTextRequest": false, + "google.generativeai.protos.GenerateTextRequest.__call__": true, + "google.generativeai.protos.GenerateTextRequest.__eq__": true, + "google.generativeai.protos.GenerateTextRequest.__ge__": true, + "google.generativeai.protos.GenerateTextRequest.__gt__": true, + "google.generativeai.protos.GenerateTextRequest.__init__": true, + "google.generativeai.protos.GenerateTextRequest.__le__": true, + "google.generativeai.protos.GenerateTextRequest.__lt__": true, + "google.generativeai.protos.GenerateTextRequest.__ne__": true, + "google.generativeai.protos.GenerateTextRequest.__new__": true, + "google.generativeai.protos.GenerateTextRequest.__or__": true, + "google.generativeai.protos.GenerateTextRequest.__ror__": true, + "google.generativeai.protos.GenerateTextRequest.candidate_count": true, + "google.generativeai.protos.GenerateTextRequest.copy_from": true, + "google.generativeai.protos.GenerateTextRequest.deserialize": true, + "google.generativeai.protos.GenerateTextRequest.from_json": true, + "google.generativeai.protos.GenerateTextRequest.max_output_tokens": true, + "google.generativeai.protos.GenerateTextRequest.model": true, + "google.generativeai.protos.GenerateTextRequest.mro": true, + "google.generativeai.protos.GenerateTextRequest.pb": true, + "google.generativeai.protos.GenerateTextRequest.prompt": true, + "google.generativeai.protos.GenerateTextRequest.safety_settings": true, + "google.generativeai.protos.GenerateTextRequest.serialize": true, + "google.generativeai.protos.GenerateTextRequest.stop_sequences": true, + "google.generativeai.protos.GenerateTextRequest.temperature": true, + "google.generativeai.protos.GenerateTextRequest.to_dict": true, + "google.generativeai.protos.GenerateTextRequest.to_json": true, + "google.generativeai.protos.GenerateTextRequest.top_k": true, + "google.generativeai.protos.GenerateTextRequest.top_p": true, + "google.generativeai.protos.GenerateTextRequest.wrap": true, + "google.generativeai.protos.GenerateTextResponse": false, + "google.generativeai.protos.GenerateTextResponse.__call__": true, + "google.generativeai.protos.GenerateTextResponse.__eq__": true, + "google.generativeai.protos.GenerateTextResponse.__ge__": true, + "google.generativeai.protos.GenerateTextResponse.__gt__": true, + "google.generativeai.protos.GenerateTextResponse.__init__": true, + "google.generativeai.protos.GenerateTextResponse.__le__": true, + "google.generativeai.protos.GenerateTextResponse.__lt__": true, + "google.generativeai.protos.GenerateTextResponse.__ne__": true, + "google.generativeai.protos.GenerateTextResponse.__new__": true, + "google.generativeai.protos.GenerateTextResponse.__or__": true, + "google.generativeai.protos.GenerateTextResponse.__ror__": true, + "google.generativeai.protos.GenerateTextResponse.candidates": true, + "google.generativeai.protos.GenerateTextResponse.copy_from": true, + "google.generativeai.protos.GenerateTextResponse.deserialize": true, + "google.generativeai.protos.GenerateTextResponse.filters": true, + "google.generativeai.protos.GenerateTextResponse.from_json": true, + "google.generativeai.protos.GenerateTextResponse.mro": true, + "google.generativeai.protos.GenerateTextResponse.pb": true, + "google.generativeai.protos.GenerateTextResponse.safety_feedback": true, + "google.generativeai.protos.GenerateTextResponse.serialize": true, + "google.generativeai.protos.GenerateTextResponse.to_dict": true, + "google.generativeai.protos.GenerateTextResponse.to_json": true, + "google.generativeai.protos.GenerateTextResponse.wrap": true, + "google.generativeai.protos.GenerationConfig": false, + "google.generativeai.protos.GenerationConfig.__call__": true, + "google.generativeai.protos.GenerationConfig.__eq__": true, + "google.generativeai.protos.GenerationConfig.__ge__": true, + "google.generativeai.protos.GenerationConfig.__gt__": true, + "google.generativeai.protos.GenerationConfig.__init__": true, + "google.generativeai.protos.GenerationConfig.__le__": true, + "google.generativeai.protos.GenerationConfig.__lt__": true, + "google.generativeai.protos.GenerationConfig.__ne__": true, + "google.generativeai.protos.GenerationConfig.__new__": true, + "google.generativeai.protos.GenerationConfig.__or__": true, + "google.generativeai.protos.GenerationConfig.__ror__": true, + "google.generativeai.protos.GenerationConfig.candidate_count": true, + "google.generativeai.protos.GenerationConfig.copy_from": true, + "google.generativeai.protos.GenerationConfig.deserialize": true, + "google.generativeai.protos.GenerationConfig.from_json": true, + "google.generativeai.protos.GenerationConfig.max_output_tokens": true, + "google.generativeai.protos.GenerationConfig.mro": true, + "google.generativeai.protos.GenerationConfig.pb": true, + "google.generativeai.protos.GenerationConfig.response_mime_type": true, + "google.generativeai.protos.GenerationConfig.response_schema": true, + "google.generativeai.protos.GenerationConfig.serialize": true, + "google.generativeai.protos.GenerationConfig.stop_sequences": true, + "google.generativeai.protos.GenerationConfig.temperature": true, + "google.generativeai.protos.GenerationConfig.to_dict": true, + "google.generativeai.protos.GenerationConfig.to_json": true, + "google.generativeai.protos.GenerationConfig.top_k": true, + "google.generativeai.protos.GenerationConfig.top_p": true, + "google.generativeai.protos.GenerationConfig.wrap": true, + "google.generativeai.protos.GetCachedContentRequest": false, + "google.generativeai.protos.GetCachedContentRequest.__call__": true, + "google.generativeai.protos.GetCachedContentRequest.__eq__": true, + "google.generativeai.protos.GetCachedContentRequest.__ge__": true, + "google.generativeai.protos.GetCachedContentRequest.__gt__": true, + "google.generativeai.protos.GetCachedContentRequest.__init__": true, + "google.generativeai.protos.GetCachedContentRequest.__le__": true, + "google.generativeai.protos.GetCachedContentRequest.__lt__": true, + "google.generativeai.protos.GetCachedContentRequest.__ne__": true, + "google.generativeai.protos.GetCachedContentRequest.__new__": true, + "google.generativeai.protos.GetCachedContentRequest.__or__": true, + "google.generativeai.protos.GetCachedContentRequest.__ror__": true, + "google.generativeai.protos.GetCachedContentRequest.copy_from": true, + "google.generativeai.protos.GetCachedContentRequest.deserialize": true, + "google.generativeai.protos.GetCachedContentRequest.from_json": true, + "google.generativeai.protos.GetCachedContentRequest.mro": true, + "google.generativeai.protos.GetCachedContentRequest.name": true, + "google.generativeai.protos.GetCachedContentRequest.pb": true, + "google.generativeai.protos.GetCachedContentRequest.serialize": true, + "google.generativeai.protos.GetCachedContentRequest.to_dict": true, + "google.generativeai.protos.GetCachedContentRequest.to_json": true, + "google.generativeai.protos.GetCachedContentRequest.wrap": true, + "google.generativeai.protos.GetChunkRequest": false, + "google.generativeai.protos.GetChunkRequest.__call__": true, + "google.generativeai.protos.GetChunkRequest.__eq__": true, + "google.generativeai.protos.GetChunkRequest.__ge__": true, + "google.generativeai.protos.GetChunkRequest.__gt__": true, + "google.generativeai.protos.GetChunkRequest.__init__": true, + "google.generativeai.protos.GetChunkRequest.__le__": true, + "google.generativeai.protos.GetChunkRequest.__lt__": true, + "google.generativeai.protos.GetChunkRequest.__ne__": true, + "google.generativeai.protos.GetChunkRequest.__new__": true, + "google.generativeai.protos.GetChunkRequest.__or__": true, + "google.generativeai.protos.GetChunkRequest.__ror__": true, + "google.generativeai.protos.GetChunkRequest.copy_from": true, + "google.generativeai.protos.GetChunkRequest.deserialize": true, + "google.generativeai.protos.GetChunkRequest.from_json": true, + "google.generativeai.protos.GetChunkRequest.mro": true, + "google.generativeai.protos.GetChunkRequest.name": true, + "google.generativeai.protos.GetChunkRequest.pb": true, + "google.generativeai.protos.GetChunkRequest.serialize": true, + "google.generativeai.protos.GetChunkRequest.to_dict": true, + "google.generativeai.protos.GetChunkRequest.to_json": true, + "google.generativeai.protos.GetChunkRequest.wrap": true, + "google.generativeai.protos.GetCorpusRequest": false, + "google.generativeai.protos.GetCorpusRequest.__call__": true, + "google.generativeai.protos.GetCorpusRequest.__eq__": true, + "google.generativeai.protos.GetCorpusRequest.__ge__": true, + "google.generativeai.protos.GetCorpusRequest.__gt__": true, + "google.generativeai.protos.GetCorpusRequest.__init__": true, + "google.generativeai.protos.GetCorpusRequest.__le__": true, + "google.generativeai.protos.GetCorpusRequest.__lt__": true, + "google.generativeai.protos.GetCorpusRequest.__ne__": true, + "google.generativeai.protos.GetCorpusRequest.__new__": true, + "google.generativeai.protos.GetCorpusRequest.__or__": true, + "google.generativeai.protos.GetCorpusRequest.__ror__": true, + "google.generativeai.protos.GetCorpusRequest.copy_from": true, + "google.generativeai.protos.GetCorpusRequest.deserialize": true, + "google.generativeai.protos.GetCorpusRequest.from_json": true, + "google.generativeai.protos.GetCorpusRequest.mro": true, + "google.generativeai.protos.GetCorpusRequest.name": true, + "google.generativeai.protos.GetCorpusRequest.pb": true, + "google.generativeai.protos.GetCorpusRequest.serialize": true, + "google.generativeai.protos.GetCorpusRequest.to_dict": true, + "google.generativeai.protos.GetCorpusRequest.to_json": true, + "google.generativeai.protos.GetCorpusRequest.wrap": true, + "google.generativeai.protos.GetDocumentRequest": false, + "google.generativeai.protos.GetDocumentRequest.__call__": true, + "google.generativeai.protos.GetDocumentRequest.__eq__": true, + "google.generativeai.protos.GetDocumentRequest.__ge__": true, + "google.generativeai.protos.GetDocumentRequest.__gt__": true, + "google.generativeai.protos.GetDocumentRequest.__init__": true, + "google.generativeai.protos.GetDocumentRequest.__le__": true, + "google.generativeai.protos.GetDocumentRequest.__lt__": true, + "google.generativeai.protos.GetDocumentRequest.__ne__": true, + "google.generativeai.protos.GetDocumentRequest.__new__": true, + "google.generativeai.protos.GetDocumentRequest.__or__": true, + "google.generativeai.protos.GetDocumentRequest.__ror__": true, + "google.generativeai.protos.GetDocumentRequest.copy_from": true, + "google.generativeai.protos.GetDocumentRequest.deserialize": true, + "google.generativeai.protos.GetDocumentRequest.from_json": true, + "google.generativeai.protos.GetDocumentRequest.mro": true, + "google.generativeai.protos.GetDocumentRequest.name": true, + "google.generativeai.protos.GetDocumentRequest.pb": true, + "google.generativeai.protos.GetDocumentRequest.serialize": true, + "google.generativeai.protos.GetDocumentRequest.to_dict": true, + "google.generativeai.protos.GetDocumentRequest.to_json": true, + "google.generativeai.protos.GetDocumentRequest.wrap": true, + "google.generativeai.protos.GetFileRequest": false, + "google.generativeai.protos.GetFileRequest.__call__": true, + "google.generativeai.protos.GetFileRequest.__eq__": true, + "google.generativeai.protos.GetFileRequest.__ge__": true, + "google.generativeai.protos.GetFileRequest.__gt__": true, + "google.generativeai.protos.GetFileRequest.__init__": true, + "google.generativeai.protos.GetFileRequest.__le__": true, + "google.generativeai.protos.GetFileRequest.__lt__": true, + "google.generativeai.protos.GetFileRequest.__ne__": true, + "google.generativeai.protos.GetFileRequest.__new__": true, + "google.generativeai.protos.GetFileRequest.__or__": true, + "google.generativeai.protos.GetFileRequest.__ror__": true, + "google.generativeai.protos.GetFileRequest.copy_from": true, + "google.generativeai.protos.GetFileRequest.deserialize": true, + "google.generativeai.protos.GetFileRequest.from_json": true, + "google.generativeai.protos.GetFileRequest.mro": true, + "google.generativeai.protos.GetFileRequest.name": true, + "google.generativeai.protos.GetFileRequest.pb": true, + "google.generativeai.protos.GetFileRequest.serialize": true, + "google.generativeai.protos.GetFileRequest.to_dict": true, + "google.generativeai.protos.GetFileRequest.to_json": true, + "google.generativeai.protos.GetFileRequest.wrap": true, + "google.generativeai.protos.GetModelRequest": false, + "google.generativeai.protos.GetModelRequest.__call__": true, + "google.generativeai.protos.GetModelRequest.__eq__": true, + "google.generativeai.protos.GetModelRequest.__ge__": true, + "google.generativeai.protos.GetModelRequest.__gt__": true, + "google.generativeai.protos.GetModelRequest.__init__": true, + "google.generativeai.protos.GetModelRequest.__le__": true, + "google.generativeai.protos.GetModelRequest.__lt__": true, + "google.generativeai.protos.GetModelRequest.__ne__": true, + "google.generativeai.protos.GetModelRequest.__new__": true, + "google.generativeai.protos.GetModelRequest.__or__": true, + "google.generativeai.protos.GetModelRequest.__ror__": true, + "google.generativeai.protos.GetModelRequest.copy_from": true, + "google.generativeai.protos.GetModelRequest.deserialize": true, + "google.generativeai.protos.GetModelRequest.from_json": true, + "google.generativeai.protos.GetModelRequest.mro": true, + "google.generativeai.protos.GetModelRequest.name": true, + "google.generativeai.protos.GetModelRequest.pb": true, + "google.generativeai.protos.GetModelRequest.serialize": true, + "google.generativeai.protos.GetModelRequest.to_dict": true, + "google.generativeai.protos.GetModelRequest.to_json": true, + "google.generativeai.protos.GetModelRequest.wrap": true, + "google.generativeai.protos.GetPermissionRequest": false, + "google.generativeai.protos.GetPermissionRequest.__call__": true, + "google.generativeai.protos.GetPermissionRequest.__eq__": true, + "google.generativeai.protos.GetPermissionRequest.__ge__": true, + "google.generativeai.protos.GetPermissionRequest.__gt__": true, + "google.generativeai.protos.GetPermissionRequest.__init__": true, + "google.generativeai.protos.GetPermissionRequest.__le__": true, + "google.generativeai.protos.GetPermissionRequest.__lt__": true, + "google.generativeai.protos.GetPermissionRequest.__ne__": true, + "google.generativeai.protos.GetPermissionRequest.__new__": true, + "google.generativeai.protos.GetPermissionRequest.__or__": true, + "google.generativeai.protos.GetPermissionRequest.__ror__": true, + "google.generativeai.protos.GetPermissionRequest.copy_from": true, + "google.generativeai.protos.GetPermissionRequest.deserialize": true, + "google.generativeai.protos.GetPermissionRequest.from_json": true, + "google.generativeai.protos.GetPermissionRequest.mro": true, + "google.generativeai.protos.GetPermissionRequest.name": true, + "google.generativeai.protos.GetPermissionRequest.pb": true, + "google.generativeai.protos.GetPermissionRequest.serialize": true, + "google.generativeai.protos.GetPermissionRequest.to_dict": true, + "google.generativeai.protos.GetPermissionRequest.to_json": true, + "google.generativeai.protos.GetPermissionRequest.wrap": true, + "google.generativeai.protos.GetTunedModelRequest": false, + "google.generativeai.protos.GetTunedModelRequest.__call__": true, + "google.generativeai.protos.GetTunedModelRequest.__eq__": true, + "google.generativeai.protos.GetTunedModelRequest.__ge__": true, + "google.generativeai.protos.GetTunedModelRequest.__gt__": true, + "google.generativeai.protos.GetTunedModelRequest.__init__": true, + "google.generativeai.protos.GetTunedModelRequest.__le__": true, + "google.generativeai.protos.GetTunedModelRequest.__lt__": true, + "google.generativeai.protos.GetTunedModelRequest.__ne__": true, + "google.generativeai.protos.GetTunedModelRequest.__new__": true, + "google.generativeai.protos.GetTunedModelRequest.__or__": true, + "google.generativeai.protos.GetTunedModelRequest.__ror__": true, + "google.generativeai.protos.GetTunedModelRequest.copy_from": true, + "google.generativeai.protos.GetTunedModelRequest.deserialize": true, + "google.generativeai.protos.GetTunedModelRequest.from_json": true, + "google.generativeai.protos.GetTunedModelRequest.mro": true, + "google.generativeai.protos.GetTunedModelRequest.name": true, + "google.generativeai.protos.GetTunedModelRequest.pb": true, + "google.generativeai.protos.GetTunedModelRequest.serialize": true, + "google.generativeai.protos.GetTunedModelRequest.to_dict": true, + "google.generativeai.protos.GetTunedModelRequest.to_json": true, + "google.generativeai.protos.GetTunedModelRequest.wrap": true, + "google.generativeai.protos.GroundingAttribution": false, + "google.generativeai.protos.GroundingAttribution.__call__": true, + "google.generativeai.protos.GroundingAttribution.__eq__": true, + "google.generativeai.protos.GroundingAttribution.__ge__": true, + "google.generativeai.protos.GroundingAttribution.__gt__": true, + "google.generativeai.protos.GroundingAttribution.__init__": true, + "google.generativeai.protos.GroundingAttribution.__le__": true, + "google.generativeai.protos.GroundingAttribution.__lt__": true, + "google.generativeai.protos.GroundingAttribution.__ne__": true, + "google.generativeai.protos.GroundingAttribution.__new__": true, + "google.generativeai.protos.GroundingAttribution.__or__": true, + "google.generativeai.protos.GroundingAttribution.__ror__": true, + "google.generativeai.protos.GroundingAttribution.content": true, + "google.generativeai.protos.GroundingAttribution.copy_from": true, + "google.generativeai.protos.GroundingAttribution.deserialize": true, + "google.generativeai.protos.GroundingAttribution.from_json": true, + "google.generativeai.protos.GroundingAttribution.mro": true, + "google.generativeai.protos.GroundingAttribution.pb": true, + "google.generativeai.protos.GroundingAttribution.serialize": true, + "google.generativeai.protos.GroundingAttribution.source_id": true, + "google.generativeai.protos.GroundingAttribution.to_dict": true, + "google.generativeai.protos.GroundingAttribution.to_json": true, + "google.generativeai.protos.GroundingAttribution.wrap": true, + "google.generativeai.protos.GroundingPassage": false, + "google.generativeai.protos.GroundingPassage.__call__": true, + "google.generativeai.protos.GroundingPassage.__eq__": true, + "google.generativeai.protos.GroundingPassage.__ge__": true, + "google.generativeai.protos.GroundingPassage.__gt__": true, + "google.generativeai.protos.GroundingPassage.__init__": true, + "google.generativeai.protos.GroundingPassage.__le__": true, + "google.generativeai.protos.GroundingPassage.__lt__": true, + "google.generativeai.protos.GroundingPassage.__ne__": true, + "google.generativeai.protos.GroundingPassage.__new__": true, + "google.generativeai.protos.GroundingPassage.__or__": true, + "google.generativeai.protos.GroundingPassage.__ror__": true, + "google.generativeai.protos.GroundingPassage.content": true, + "google.generativeai.protos.GroundingPassage.copy_from": true, + "google.generativeai.protos.GroundingPassage.deserialize": true, + "google.generativeai.protos.GroundingPassage.from_json": true, + "google.generativeai.protos.GroundingPassage.id": true, + "google.generativeai.protos.GroundingPassage.mro": true, + "google.generativeai.protos.GroundingPassage.pb": true, + "google.generativeai.protos.GroundingPassage.serialize": true, + "google.generativeai.protos.GroundingPassage.to_dict": true, + "google.generativeai.protos.GroundingPassage.to_json": true, + "google.generativeai.protos.GroundingPassage.wrap": true, + "google.generativeai.protos.GroundingPassages": false, + "google.generativeai.protos.GroundingPassages.__call__": true, + "google.generativeai.protos.GroundingPassages.__eq__": true, + "google.generativeai.protos.GroundingPassages.__ge__": true, + "google.generativeai.protos.GroundingPassages.__gt__": true, + "google.generativeai.protos.GroundingPassages.__init__": true, + "google.generativeai.protos.GroundingPassages.__le__": true, + "google.generativeai.protos.GroundingPassages.__lt__": true, + "google.generativeai.protos.GroundingPassages.__ne__": true, + "google.generativeai.protos.GroundingPassages.__new__": true, + "google.generativeai.protos.GroundingPassages.__or__": true, + "google.generativeai.protos.GroundingPassages.__ror__": true, + "google.generativeai.protos.GroundingPassages.copy_from": true, + "google.generativeai.protos.GroundingPassages.deserialize": true, + "google.generativeai.protos.GroundingPassages.from_json": true, + "google.generativeai.protos.GroundingPassages.mro": true, + "google.generativeai.protos.GroundingPassages.passages": true, + "google.generativeai.protos.GroundingPassages.pb": true, + "google.generativeai.protos.GroundingPassages.serialize": true, + "google.generativeai.protos.GroundingPassages.to_dict": true, + "google.generativeai.protos.GroundingPassages.to_json": true, + "google.generativeai.protos.GroundingPassages.wrap": true, + "google.generativeai.protos.HarmCategory": false, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DEROGATORY": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HARASSMENT": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_MEDICAL": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUAL": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_TOXICITY": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_VIOLENCE": true, + "google.generativeai.protos.HarmCategory.__abs__": true, + "google.generativeai.protos.HarmCategory.__add__": true, + "google.generativeai.protos.HarmCategory.__and__": true, + "google.generativeai.protos.HarmCategory.__bool__": true, + "google.generativeai.protos.HarmCategory.__contains__": true, + "google.generativeai.protos.HarmCategory.__eq__": true, + "google.generativeai.protos.HarmCategory.__floordiv__": true, + "google.generativeai.protos.HarmCategory.__ge__": true, + "google.generativeai.protos.HarmCategory.__getitem__": true, + "google.generativeai.protos.HarmCategory.__gt__": true, + "google.generativeai.protos.HarmCategory.__init__": true, + "google.generativeai.protos.HarmCategory.__invert__": true, + "google.generativeai.protos.HarmCategory.__iter__": true, + "google.generativeai.protos.HarmCategory.__le__": true, + "google.generativeai.protos.HarmCategory.__len__": true, + "google.generativeai.protos.HarmCategory.__lshift__": true, + "google.generativeai.protos.HarmCategory.__lt__": true, + "google.generativeai.protos.HarmCategory.__mod__": true, + "google.generativeai.protos.HarmCategory.__mul__": true, + "google.generativeai.protos.HarmCategory.__ne__": true, + "google.generativeai.protos.HarmCategory.__neg__": true, + "google.generativeai.protos.HarmCategory.__new__": true, + "google.generativeai.protos.HarmCategory.__or__": true, + "google.generativeai.protos.HarmCategory.__pos__": true, + "google.generativeai.protos.HarmCategory.__pow__": true, + "google.generativeai.protos.HarmCategory.__radd__": true, + "google.generativeai.protos.HarmCategory.__rand__": true, + "google.generativeai.protos.HarmCategory.__rfloordiv__": true, + "google.generativeai.protos.HarmCategory.__rlshift__": true, + "google.generativeai.protos.HarmCategory.__rmod__": true, + "google.generativeai.protos.HarmCategory.__rmul__": true, + "google.generativeai.protos.HarmCategory.__ror__": true, + "google.generativeai.protos.HarmCategory.__rpow__": true, + "google.generativeai.protos.HarmCategory.__rrshift__": true, + "google.generativeai.protos.HarmCategory.__rshift__": true, + "google.generativeai.protos.HarmCategory.__rsub__": true, + "google.generativeai.protos.HarmCategory.__rtruediv__": true, + "google.generativeai.protos.HarmCategory.__rxor__": true, + "google.generativeai.protos.HarmCategory.__sub__": true, + "google.generativeai.protos.HarmCategory.__truediv__": true, + "google.generativeai.protos.HarmCategory.__xor__": true, + "google.generativeai.protos.HarmCategory.as_integer_ratio": true, + "google.generativeai.protos.HarmCategory.bit_count": true, + "google.generativeai.protos.HarmCategory.bit_length": true, + "google.generativeai.protos.HarmCategory.conjugate": true, + "google.generativeai.protos.HarmCategory.denominator": true, + "google.generativeai.protos.HarmCategory.from_bytes": true, + "google.generativeai.protos.HarmCategory.imag": true, + "google.generativeai.protos.HarmCategory.numerator": true, + "google.generativeai.protos.HarmCategory.real": true, + "google.generativeai.protos.HarmCategory.to_bytes": true, + "google.generativeai.protos.Hyperparameters": false, + "google.generativeai.protos.Hyperparameters.__call__": true, + "google.generativeai.protos.Hyperparameters.__eq__": true, + "google.generativeai.protos.Hyperparameters.__ge__": true, + "google.generativeai.protos.Hyperparameters.__gt__": true, + "google.generativeai.protos.Hyperparameters.__init__": true, + "google.generativeai.protos.Hyperparameters.__le__": true, + "google.generativeai.protos.Hyperparameters.__lt__": true, + "google.generativeai.protos.Hyperparameters.__ne__": true, + "google.generativeai.protos.Hyperparameters.__new__": true, + "google.generativeai.protos.Hyperparameters.__or__": true, + "google.generativeai.protos.Hyperparameters.__ror__": true, + "google.generativeai.protos.Hyperparameters.batch_size": true, + "google.generativeai.protos.Hyperparameters.copy_from": true, + "google.generativeai.protos.Hyperparameters.deserialize": true, + "google.generativeai.protos.Hyperparameters.epoch_count": true, + "google.generativeai.protos.Hyperparameters.from_json": true, + "google.generativeai.protos.Hyperparameters.learning_rate": true, + "google.generativeai.protos.Hyperparameters.learning_rate_multiplier": true, + "google.generativeai.protos.Hyperparameters.mro": true, + "google.generativeai.protos.Hyperparameters.pb": true, + "google.generativeai.protos.Hyperparameters.serialize": true, + "google.generativeai.protos.Hyperparameters.to_dict": true, + "google.generativeai.protos.Hyperparameters.to_json": true, + "google.generativeai.protos.Hyperparameters.wrap": true, + "google.generativeai.protos.ListCachedContentsRequest": false, + "google.generativeai.protos.ListCachedContentsRequest.__call__": true, + "google.generativeai.protos.ListCachedContentsRequest.__eq__": true, + "google.generativeai.protos.ListCachedContentsRequest.__ge__": true, + "google.generativeai.protos.ListCachedContentsRequest.__gt__": true, + "google.generativeai.protos.ListCachedContentsRequest.__init__": true, + "google.generativeai.protos.ListCachedContentsRequest.__le__": true, + "google.generativeai.protos.ListCachedContentsRequest.__lt__": true, + "google.generativeai.protos.ListCachedContentsRequest.__ne__": true, + "google.generativeai.protos.ListCachedContentsRequest.__new__": true, + "google.generativeai.protos.ListCachedContentsRequest.__or__": true, + "google.generativeai.protos.ListCachedContentsRequest.__ror__": true, + "google.generativeai.protos.ListCachedContentsRequest.copy_from": true, + "google.generativeai.protos.ListCachedContentsRequest.deserialize": true, + "google.generativeai.protos.ListCachedContentsRequest.from_json": true, + "google.generativeai.protos.ListCachedContentsRequest.mro": true, + "google.generativeai.protos.ListCachedContentsRequest.page_size": true, + "google.generativeai.protos.ListCachedContentsRequest.page_token": true, + "google.generativeai.protos.ListCachedContentsRequest.pb": true, + "google.generativeai.protos.ListCachedContentsRequest.serialize": true, + "google.generativeai.protos.ListCachedContentsRequest.to_dict": true, + "google.generativeai.protos.ListCachedContentsRequest.to_json": true, + "google.generativeai.protos.ListCachedContentsRequest.wrap": true, + "google.generativeai.protos.ListCachedContentsResponse": false, + "google.generativeai.protos.ListCachedContentsResponse.__call__": true, + "google.generativeai.protos.ListCachedContentsResponse.__eq__": true, + "google.generativeai.protos.ListCachedContentsResponse.__ge__": true, + "google.generativeai.protos.ListCachedContentsResponse.__gt__": true, + "google.generativeai.protos.ListCachedContentsResponse.__init__": true, + "google.generativeai.protos.ListCachedContentsResponse.__le__": true, + "google.generativeai.protos.ListCachedContentsResponse.__lt__": true, + "google.generativeai.protos.ListCachedContentsResponse.__ne__": true, + "google.generativeai.protos.ListCachedContentsResponse.__new__": true, + "google.generativeai.protos.ListCachedContentsResponse.__or__": true, + "google.generativeai.protos.ListCachedContentsResponse.__ror__": true, + "google.generativeai.protos.ListCachedContentsResponse.cached_contents": true, + "google.generativeai.protos.ListCachedContentsResponse.copy_from": true, + "google.generativeai.protos.ListCachedContentsResponse.deserialize": true, + "google.generativeai.protos.ListCachedContentsResponse.from_json": true, + "google.generativeai.protos.ListCachedContentsResponse.mro": true, + "google.generativeai.protos.ListCachedContentsResponse.next_page_token": true, + "google.generativeai.protos.ListCachedContentsResponse.pb": true, + "google.generativeai.protos.ListCachedContentsResponse.serialize": true, + "google.generativeai.protos.ListCachedContentsResponse.to_dict": true, + "google.generativeai.protos.ListCachedContentsResponse.to_json": true, + "google.generativeai.protos.ListCachedContentsResponse.wrap": true, + "google.generativeai.protos.ListChunksRequest": false, + "google.generativeai.protos.ListChunksRequest.__call__": true, + "google.generativeai.protos.ListChunksRequest.__eq__": true, + "google.generativeai.protos.ListChunksRequest.__ge__": true, + "google.generativeai.protos.ListChunksRequest.__gt__": true, + "google.generativeai.protos.ListChunksRequest.__init__": true, + "google.generativeai.protos.ListChunksRequest.__le__": true, + "google.generativeai.protos.ListChunksRequest.__lt__": true, + "google.generativeai.protos.ListChunksRequest.__ne__": true, + "google.generativeai.protos.ListChunksRequest.__new__": true, + "google.generativeai.protos.ListChunksRequest.__or__": true, + "google.generativeai.protos.ListChunksRequest.__ror__": true, + "google.generativeai.protos.ListChunksRequest.copy_from": true, + "google.generativeai.protos.ListChunksRequest.deserialize": true, + "google.generativeai.protos.ListChunksRequest.from_json": true, + "google.generativeai.protos.ListChunksRequest.mro": true, + "google.generativeai.protos.ListChunksRequest.page_size": true, + "google.generativeai.protos.ListChunksRequest.page_token": true, + "google.generativeai.protos.ListChunksRequest.parent": true, + "google.generativeai.protos.ListChunksRequest.pb": true, + "google.generativeai.protos.ListChunksRequest.serialize": true, + "google.generativeai.protos.ListChunksRequest.to_dict": true, + "google.generativeai.protos.ListChunksRequest.to_json": true, + "google.generativeai.protos.ListChunksRequest.wrap": true, + "google.generativeai.protos.ListChunksResponse": false, + "google.generativeai.protos.ListChunksResponse.__call__": true, + "google.generativeai.protos.ListChunksResponse.__eq__": true, + "google.generativeai.protos.ListChunksResponse.__ge__": true, + "google.generativeai.protos.ListChunksResponse.__gt__": true, + "google.generativeai.protos.ListChunksResponse.__init__": true, + "google.generativeai.protos.ListChunksResponse.__le__": true, + "google.generativeai.protos.ListChunksResponse.__lt__": true, + "google.generativeai.protos.ListChunksResponse.__ne__": true, + "google.generativeai.protos.ListChunksResponse.__new__": true, + "google.generativeai.protos.ListChunksResponse.__or__": true, + "google.generativeai.protos.ListChunksResponse.__ror__": true, + "google.generativeai.protos.ListChunksResponse.chunks": true, + "google.generativeai.protos.ListChunksResponse.copy_from": true, + "google.generativeai.protos.ListChunksResponse.deserialize": true, + "google.generativeai.protos.ListChunksResponse.from_json": true, + "google.generativeai.protos.ListChunksResponse.mro": true, + "google.generativeai.protos.ListChunksResponse.next_page_token": true, + "google.generativeai.protos.ListChunksResponse.pb": true, + "google.generativeai.protos.ListChunksResponse.serialize": true, + "google.generativeai.protos.ListChunksResponse.to_dict": true, + "google.generativeai.protos.ListChunksResponse.to_json": true, + "google.generativeai.protos.ListChunksResponse.wrap": true, + "google.generativeai.protos.ListCorporaRequest": false, + "google.generativeai.protos.ListCorporaRequest.__call__": true, + "google.generativeai.protos.ListCorporaRequest.__eq__": true, + "google.generativeai.protos.ListCorporaRequest.__ge__": true, + "google.generativeai.protos.ListCorporaRequest.__gt__": true, + "google.generativeai.protos.ListCorporaRequest.__init__": true, + "google.generativeai.protos.ListCorporaRequest.__le__": true, + "google.generativeai.protos.ListCorporaRequest.__lt__": true, + "google.generativeai.protos.ListCorporaRequest.__ne__": true, + "google.generativeai.protos.ListCorporaRequest.__new__": true, + "google.generativeai.protos.ListCorporaRequest.__or__": true, + "google.generativeai.protos.ListCorporaRequest.__ror__": true, + "google.generativeai.protos.ListCorporaRequest.copy_from": true, + "google.generativeai.protos.ListCorporaRequest.deserialize": true, + "google.generativeai.protos.ListCorporaRequest.from_json": true, + "google.generativeai.protos.ListCorporaRequest.mro": true, + "google.generativeai.protos.ListCorporaRequest.page_size": true, + "google.generativeai.protos.ListCorporaRequest.page_token": true, + "google.generativeai.protos.ListCorporaRequest.pb": true, + "google.generativeai.protos.ListCorporaRequest.serialize": true, + "google.generativeai.protos.ListCorporaRequest.to_dict": true, + "google.generativeai.protos.ListCorporaRequest.to_json": true, + "google.generativeai.protos.ListCorporaRequest.wrap": true, + "google.generativeai.protos.ListCorporaResponse": false, + "google.generativeai.protos.ListCorporaResponse.__call__": true, + "google.generativeai.protos.ListCorporaResponse.__eq__": true, + "google.generativeai.protos.ListCorporaResponse.__ge__": true, + "google.generativeai.protos.ListCorporaResponse.__gt__": true, + "google.generativeai.protos.ListCorporaResponse.__init__": true, + "google.generativeai.protos.ListCorporaResponse.__le__": true, + "google.generativeai.protos.ListCorporaResponse.__lt__": true, + "google.generativeai.protos.ListCorporaResponse.__ne__": true, + "google.generativeai.protos.ListCorporaResponse.__new__": true, + "google.generativeai.protos.ListCorporaResponse.__or__": true, + "google.generativeai.protos.ListCorporaResponse.__ror__": true, + "google.generativeai.protos.ListCorporaResponse.copy_from": true, + "google.generativeai.protos.ListCorporaResponse.corpora": true, + "google.generativeai.protos.ListCorporaResponse.deserialize": true, + "google.generativeai.protos.ListCorporaResponse.from_json": true, + "google.generativeai.protos.ListCorporaResponse.mro": true, + "google.generativeai.protos.ListCorporaResponse.next_page_token": true, + "google.generativeai.protos.ListCorporaResponse.pb": true, + "google.generativeai.protos.ListCorporaResponse.serialize": true, + "google.generativeai.protos.ListCorporaResponse.to_dict": true, + "google.generativeai.protos.ListCorporaResponse.to_json": true, + "google.generativeai.protos.ListCorporaResponse.wrap": true, + "google.generativeai.protos.ListDocumentsRequest": false, + "google.generativeai.protos.ListDocumentsRequest.__call__": true, + "google.generativeai.protos.ListDocumentsRequest.__eq__": true, + "google.generativeai.protos.ListDocumentsRequest.__ge__": true, + "google.generativeai.protos.ListDocumentsRequest.__gt__": true, + "google.generativeai.protos.ListDocumentsRequest.__init__": true, + "google.generativeai.protos.ListDocumentsRequest.__le__": true, + "google.generativeai.protos.ListDocumentsRequest.__lt__": true, + "google.generativeai.protos.ListDocumentsRequest.__ne__": true, + "google.generativeai.protos.ListDocumentsRequest.__new__": true, + "google.generativeai.protos.ListDocumentsRequest.__or__": true, + "google.generativeai.protos.ListDocumentsRequest.__ror__": true, + "google.generativeai.protos.ListDocumentsRequest.copy_from": true, + "google.generativeai.protos.ListDocumentsRequest.deserialize": true, + "google.generativeai.protos.ListDocumentsRequest.from_json": true, + "google.generativeai.protos.ListDocumentsRequest.mro": true, + "google.generativeai.protos.ListDocumentsRequest.page_size": true, + "google.generativeai.protos.ListDocumentsRequest.page_token": true, + "google.generativeai.protos.ListDocumentsRequest.parent": true, + "google.generativeai.protos.ListDocumentsRequest.pb": true, + "google.generativeai.protos.ListDocumentsRequest.serialize": true, + "google.generativeai.protos.ListDocumentsRequest.to_dict": true, + "google.generativeai.protos.ListDocumentsRequest.to_json": true, + "google.generativeai.protos.ListDocumentsRequest.wrap": true, + "google.generativeai.protos.ListDocumentsResponse": false, + "google.generativeai.protos.ListDocumentsResponse.__call__": true, + "google.generativeai.protos.ListDocumentsResponse.__eq__": true, + "google.generativeai.protos.ListDocumentsResponse.__ge__": true, + "google.generativeai.protos.ListDocumentsResponse.__gt__": true, + "google.generativeai.protos.ListDocumentsResponse.__init__": true, + "google.generativeai.protos.ListDocumentsResponse.__le__": true, + "google.generativeai.protos.ListDocumentsResponse.__lt__": true, + "google.generativeai.protos.ListDocumentsResponse.__ne__": true, + "google.generativeai.protos.ListDocumentsResponse.__new__": true, + "google.generativeai.protos.ListDocumentsResponse.__or__": true, + "google.generativeai.protos.ListDocumentsResponse.__ror__": true, + "google.generativeai.protos.ListDocumentsResponse.copy_from": true, + "google.generativeai.protos.ListDocumentsResponse.deserialize": true, + "google.generativeai.protos.ListDocumentsResponse.documents": true, + "google.generativeai.protos.ListDocumentsResponse.from_json": true, + "google.generativeai.protos.ListDocumentsResponse.mro": true, + "google.generativeai.protos.ListDocumentsResponse.next_page_token": true, + "google.generativeai.protos.ListDocumentsResponse.pb": true, + "google.generativeai.protos.ListDocumentsResponse.serialize": true, + "google.generativeai.protos.ListDocumentsResponse.to_dict": true, + "google.generativeai.protos.ListDocumentsResponse.to_json": true, + "google.generativeai.protos.ListDocumentsResponse.wrap": true, + "google.generativeai.protos.ListFilesRequest": false, + "google.generativeai.protos.ListFilesRequest.__call__": true, + "google.generativeai.protos.ListFilesRequest.__eq__": true, + "google.generativeai.protos.ListFilesRequest.__ge__": true, + "google.generativeai.protos.ListFilesRequest.__gt__": true, + "google.generativeai.protos.ListFilesRequest.__init__": true, + "google.generativeai.protos.ListFilesRequest.__le__": true, + "google.generativeai.protos.ListFilesRequest.__lt__": true, + "google.generativeai.protos.ListFilesRequest.__ne__": true, + "google.generativeai.protos.ListFilesRequest.__new__": true, + "google.generativeai.protos.ListFilesRequest.__or__": true, + "google.generativeai.protos.ListFilesRequest.__ror__": true, + "google.generativeai.protos.ListFilesRequest.copy_from": true, + "google.generativeai.protos.ListFilesRequest.deserialize": true, + "google.generativeai.protos.ListFilesRequest.from_json": true, + "google.generativeai.protos.ListFilesRequest.mro": true, + "google.generativeai.protos.ListFilesRequest.page_size": true, + "google.generativeai.protos.ListFilesRequest.page_token": true, + "google.generativeai.protos.ListFilesRequest.pb": true, + "google.generativeai.protos.ListFilesRequest.serialize": true, + "google.generativeai.protos.ListFilesRequest.to_dict": true, + "google.generativeai.protos.ListFilesRequest.to_json": true, + "google.generativeai.protos.ListFilesRequest.wrap": true, + "google.generativeai.protos.ListFilesResponse": false, + "google.generativeai.protos.ListFilesResponse.__call__": true, + "google.generativeai.protos.ListFilesResponse.__eq__": true, + "google.generativeai.protos.ListFilesResponse.__ge__": true, + "google.generativeai.protos.ListFilesResponse.__gt__": true, + "google.generativeai.protos.ListFilesResponse.__init__": true, + "google.generativeai.protos.ListFilesResponse.__le__": true, + "google.generativeai.protos.ListFilesResponse.__lt__": true, + "google.generativeai.protos.ListFilesResponse.__ne__": true, + "google.generativeai.protos.ListFilesResponse.__new__": true, + "google.generativeai.protos.ListFilesResponse.__or__": true, + "google.generativeai.protos.ListFilesResponse.__ror__": true, + "google.generativeai.protos.ListFilesResponse.copy_from": true, + "google.generativeai.protos.ListFilesResponse.deserialize": true, + "google.generativeai.protos.ListFilesResponse.files": true, + "google.generativeai.protos.ListFilesResponse.from_json": true, + "google.generativeai.protos.ListFilesResponse.mro": true, + "google.generativeai.protos.ListFilesResponse.next_page_token": true, + "google.generativeai.protos.ListFilesResponse.pb": true, + "google.generativeai.protos.ListFilesResponse.serialize": true, + "google.generativeai.protos.ListFilesResponse.to_dict": true, + "google.generativeai.protos.ListFilesResponse.to_json": true, + "google.generativeai.protos.ListFilesResponse.wrap": true, + "google.generativeai.protos.ListModelsRequest": false, + "google.generativeai.protos.ListModelsRequest.__call__": true, + "google.generativeai.protos.ListModelsRequest.__eq__": true, + "google.generativeai.protos.ListModelsRequest.__ge__": true, + "google.generativeai.protos.ListModelsRequest.__gt__": true, + "google.generativeai.protos.ListModelsRequest.__init__": true, + "google.generativeai.protos.ListModelsRequest.__le__": true, + "google.generativeai.protos.ListModelsRequest.__lt__": true, + "google.generativeai.protos.ListModelsRequest.__ne__": true, + "google.generativeai.protos.ListModelsRequest.__new__": true, + "google.generativeai.protos.ListModelsRequest.__or__": true, + "google.generativeai.protos.ListModelsRequest.__ror__": true, + "google.generativeai.protos.ListModelsRequest.copy_from": true, + "google.generativeai.protos.ListModelsRequest.deserialize": true, + "google.generativeai.protos.ListModelsRequest.from_json": true, + "google.generativeai.protos.ListModelsRequest.mro": true, + "google.generativeai.protos.ListModelsRequest.page_size": true, + "google.generativeai.protos.ListModelsRequest.page_token": true, + "google.generativeai.protos.ListModelsRequest.pb": true, + "google.generativeai.protos.ListModelsRequest.serialize": true, + "google.generativeai.protos.ListModelsRequest.to_dict": true, + "google.generativeai.protos.ListModelsRequest.to_json": true, + "google.generativeai.protos.ListModelsRequest.wrap": true, + "google.generativeai.protos.ListModelsResponse": false, + "google.generativeai.protos.ListModelsResponse.__call__": true, + "google.generativeai.protos.ListModelsResponse.__eq__": true, + "google.generativeai.protos.ListModelsResponse.__ge__": true, + "google.generativeai.protos.ListModelsResponse.__gt__": true, + "google.generativeai.protos.ListModelsResponse.__init__": true, + "google.generativeai.protos.ListModelsResponse.__le__": true, + "google.generativeai.protos.ListModelsResponse.__lt__": true, + "google.generativeai.protos.ListModelsResponse.__ne__": true, + "google.generativeai.protos.ListModelsResponse.__new__": true, + "google.generativeai.protos.ListModelsResponse.__or__": true, + "google.generativeai.protos.ListModelsResponse.__ror__": true, + "google.generativeai.protos.ListModelsResponse.copy_from": true, + "google.generativeai.protos.ListModelsResponse.deserialize": true, + "google.generativeai.protos.ListModelsResponse.from_json": true, + "google.generativeai.protos.ListModelsResponse.models": true, + "google.generativeai.protos.ListModelsResponse.mro": true, + "google.generativeai.protos.ListModelsResponse.next_page_token": true, + "google.generativeai.protos.ListModelsResponse.pb": true, + "google.generativeai.protos.ListModelsResponse.serialize": true, + "google.generativeai.protos.ListModelsResponse.to_dict": true, + "google.generativeai.protos.ListModelsResponse.to_json": true, + "google.generativeai.protos.ListModelsResponse.wrap": true, + "google.generativeai.protos.ListPermissionsRequest": false, + "google.generativeai.protos.ListPermissionsRequest.__call__": true, + "google.generativeai.protos.ListPermissionsRequest.__eq__": true, + "google.generativeai.protos.ListPermissionsRequest.__ge__": true, + "google.generativeai.protos.ListPermissionsRequest.__gt__": true, + "google.generativeai.protos.ListPermissionsRequest.__init__": true, + "google.generativeai.protos.ListPermissionsRequest.__le__": true, + "google.generativeai.protos.ListPermissionsRequest.__lt__": true, + "google.generativeai.protos.ListPermissionsRequest.__ne__": true, + "google.generativeai.protos.ListPermissionsRequest.__new__": true, + "google.generativeai.protos.ListPermissionsRequest.__or__": true, + "google.generativeai.protos.ListPermissionsRequest.__ror__": true, + "google.generativeai.protos.ListPermissionsRequest.copy_from": true, + "google.generativeai.protos.ListPermissionsRequest.deserialize": true, + "google.generativeai.protos.ListPermissionsRequest.from_json": true, + "google.generativeai.protos.ListPermissionsRequest.mro": true, + "google.generativeai.protos.ListPermissionsRequest.page_size": true, + "google.generativeai.protos.ListPermissionsRequest.page_token": true, + "google.generativeai.protos.ListPermissionsRequest.parent": true, + "google.generativeai.protos.ListPermissionsRequest.pb": true, + "google.generativeai.protos.ListPermissionsRequest.serialize": true, + "google.generativeai.protos.ListPermissionsRequest.to_dict": true, + "google.generativeai.protos.ListPermissionsRequest.to_json": true, + "google.generativeai.protos.ListPermissionsRequest.wrap": true, + "google.generativeai.protos.ListPermissionsResponse": false, + "google.generativeai.protos.ListPermissionsResponse.__call__": true, + "google.generativeai.protos.ListPermissionsResponse.__eq__": true, + "google.generativeai.protos.ListPermissionsResponse.__ge__": true, + "google.generativeai.protos.ListPermissionsResponse.__gt__": true, + "google.generativeai.protos.ListPermissionsResponse.__init__": true, + "google.generativeai.protos.ListPermissionsResponse.__le__": true, + "google.generativeai.protos.ListPermissionsResponse.__lt__": true, + "google.generativeai.protos.ListPermissionsResponse.__ne__": true, + "google.generativeai.protos.ListPermissionsResponse.__new__": true, + "google.generativeai.protos.ListPermissionsResponse.__or__": true, + "google.generativeai.protos.ListPermissionsResponse.__ror__": true, + "google.generativeai.protos.ListPermissionsResponse.copy_from": true, + "google.generativeai.protos.ListPermissionsResponse.deserialize": true, + "google.generativeai.protos.ListPermissionsResponse.from_json": true, + "google.generativeai.protos.ListPermissionsResponse.mro": true, + "google.generativeai.protos.ListPermissionsResponse.next_page_token": true, + "google.generativeai.protos.ListPermissionsResponse.pb": true, + "google.generativeai.protos.ListPermissionsResponse.permissions": true, + "google.generativeai.protos.ListPermissionsResponse.serialize": true, + "google.generativeai.protos.ListPermissionsResponse.to_dict": true, + "google.generativeai.protos.ListPermissionsResponse.to_json": true, + "google.generativeai.protos.ListPermissionsResponse.wrap": true, + "google.generativeai.protos.ListTunedModelsRequest": false, + "google.generativeai.protos.ListTunedModelsRequest.__call__": true, + "google.generativeai.protos.ListTunedModelsRequest.__eq__": true, + "google.generativeai.protos.ListTunedModelsRequest.__ge__": true, + "google.generativeai.protos.ListTunedModelsRequest.__gt__": true, + "google.generativeai.protos.ListTunedModelsRequest.__init__": true, + "google.generativeai.protos.ListTunedModelsRequest.__le__": true, + "google.generativeai.protos.ListTunedModelsRequest.__lt__": true, + "google.generativeai.protos.ListTunedModelsRequest.__ne__": true, + "google.generativeai.protos.ListTunedModelsRequest.__new__": true, + "google.generativeai.protos.ListTunedModelsRequest.__or__": true, + "google.generativeai.protos.ListTunedModelsRequest.__ror__": true, + "google.generativeai.protos.ListTunedModelsRequest.copy_from": true, + "google.generativeai.protos.ListTunedModelsRequest.deserialize": true, + "google.generativeai.protos.ListTunedModelsRequest.filter": true, + "google.generativeai.protos.ListTunedModelsRequest.from_json": true, + "google.generativeai.protos.ListTunedModelsRequest.mro": true, + "google.generativeai.protos.ListTunedModelsRequest.page_size": true, + "google.generativeai.protos.ListTunedModelsRequest.page_token": true, + "google.generativeai.protos.ListTunedModelsRequest.pb": true, + "google.generativeai.protos.ListTunedModelsRequest.serialize": true, + "google.generativeai.protos.ListTunedModelsRequest.to_dict": true, + "google.generativeai.protos.ListTunedModelsRequest.to_json": true, + "google.generativeai.protos.ListTunedModelsRequest.wrap": true, + "google.generativeai.protos.ListTunedModelsResponse": false, + "google.generativeai.protos.ListTunedModelsResponse.__call__": true, + "google.generativeai.protos.ListTunedModelsResponse.__eq__": true, + "google.generativeai.protos.ListTunedModelsResponse.__ge__": true, + "google.generativeai.protos.ListTunedModelsResponse.__gt__": true, + "google.generativeai.protos.ListTunedModelsResponse.__init__": true, + "google.generativeai.protos.ListTunedModelsResponse.__le__": true, + "google.generativeai.protos.ListTunedModelsResponse.__lt__": true, + "google.generativeai.protos.ListTunedModelsResponse.__ne__": true, + "google.generativeai.protos.ListTunedModelsResponse.__new__": true, + "google.generativeai.protos.ListTunedModelsResponse.__or__": true, + "google.generativeai.protos.ListTunedModelsResponse.__ror__": true, + "google.generativeai.protos.ListTunedModelsResponse.copy_from": true, + "google.generativeai.protos.ListTunedModelsResponse.deserialize": true, + "google.generativeai.protos.ListTunedModelsResponse.from_json": true, + "google.generativeai.protos.ListTunedModelsResponse.mro": true, + "google.generativeai.protos.ListTunedModelsResponse.next_page_token": true, + "google.generativeai.protos.ListTunedModelsResponse.pb": true, + "google.generativeai.protos.ListTunedModelsResponse.serialize": true, + "google.generativeai.protos.ListTunedModelsResponse.to_dict": true, + "google.generativeai.protos.ListTunedModelsResponse.to_json": true, + "google.generativeai.protos.ListTunedModelsResponse.tuned_models": true, + "google.generativeai.protos.ListTunedModelsResponse.wrap": true, + "google.generativeai.protos.Message": false, + "google.generativeai.protos.Message.__call__": true, + "google.generativeai.protos.Message.__eq__": true, + "google.generativeai.protos.Message.__ge__": true, + "google.generativeai.protos.Message.__gt__": true, + "google.generativeai.protos.Message.__init__": true, + "google.generativeai.protos.Message.__le__": true, + "google.generativeai.protos.Message.__lt__": true, + "google.generativeai.protos.Message.__ne__": true, + "google.generativeai.protos.Message.__new__": true, + "google.generativeai.protos.Message.__or__": true, + "google.generativeai.protos.Message.__ror__": true, + "google.generativeai.protos.Message.author": true, + "google.generativeai.protos.Message.citation_metadata": true, + "google.generativeai.protos.Message.content": true, + "google.generativeai.protos.Message.copy_from": true, + "google.generativeai.protos.Message.deserialize": true, + "google.generativeai.protos.Message.from_json": true, + "google.generativeai.protos.Message.mro": true, + "google.generativeai.protos.Message.pb": true, + "google.generativeai.protos.Message.serialize": true, + "google.generativeai.protos.Message.to_dict": true, + "google.generativeai.protos.Message.to_json": true, + "google.generativeai.protos.Message.wrap": true, + "google.generativeai.protos.MessagePrompt": false, + "google.generativeai.protos.MessagePrompt.__call__": true, + "google.generativeai.protos.MessagePrompt.__eq__": true, + "google.generativeai.protos.MessagePrompt.__ge__": true, + "google.generativeai.protos.MessagePrompt.__gt__": true, + "google.generativeai.protos.MessagePrompt.__init__": true, + "google.generativeai.protos.MessagePrompt.__le__": true, + "google.generativeai.protos.MessagePrompt.__lt__": true, + "google.generativeai.protos.MessagePrompt.__ne__": true, + "google.generativeai.protos.MessagePrompt.__new__": true, + "google.generativeai.protos.MessagePrompt.__or__": true, + "google.generativeai.protos.MessagePrompt.__ror__": true, + "google.generativeai.protos.MessagePrompt.context": true, + "google.generativeai.protos.MessagePrompt.copy_from": true, + "google.generativeai.protos.MessagePrompt.deserialize": true, + "google.generativeai.protos.MessagePrompt.examples": true, + "google.generativeai.protos.MessagePrompt.from_json": true, + "google.generativeai.protos.MessagePrompt.messages": true, + "google.generativeai.protos.MessagePrompt.mro": true, + "google.generativeai.protos.MessagePrompt.pb": true, + "google.generativeai.protos.MessagePrompt.serialize": true, + "google.generativeai.protos.MessagePrompt.to_dict": true, + "google.generativeai.protos.MessagePrompt.to_json": true, + "google.generativeai.protos.MessagePrompt.wrap": true, + "google.generativeai.protos.MetadataFilter": false, + "google.generativeai.protos.MetadataFilter.__call__": true, + "google.generativeai.protos.MetadataFilter.__eq__": true, + "google.generativeai.protos.MetadataFilter.__ge__": true, + "google.generativeai.protos.MetadataFilter.__gt__": true, + "google.generativeai.protos.MetadataFilter.__init__": true, + "google.generativeai.protos.MetadataFilter.__le__": true, + "google.generativeai.protos.MetadataFilter.__lt__": true, + "google.generativeai.protos.MetadataFilter.__ne__": true, + "google.generativeai.protos.MetadataFilter.__new__": true, + "google.generativeai.protos.MetadataFilter.__or__": true, + "google.generativeai.protos.MetadataFilter.__ror__": true, + "google.generativeai.protos.MetadataFilter.conditions": true, + "google.generativeai.protos.MetadataFilter.copy_from": true, + "google.generativeai.protos.MetadataFilter.deserialize": true, + "google.generativeai.protos.MetadataFilter.from_json": true, + "google.generativeai.protos.MetadataFilter.key": true, + "google.generativeai.protos.MetadataFilter.mro": true, + "google.generativeai.protos.MetadataFilter.pb": true, + "google.generativeai.protos.MetadataFilter.serialize": true, + "google.generativeai.protos.MetadataFilter.to_dict": true, + "google.generativeai.protos.MetadataFilter.to_json": true, + "google.generativeai.protos.MetadataFilter.wrap": true, + "google.generativeai.protos.Model": false, + "google.generativeai.protos.Model.__call__": true, + "google.generativeai.protos.Model.__eq__": true, + "google.generativeai.protos.Model.__ge__": true, + "google.generativeai.protos.Model.__gt__": true, + "google.generativeai.protos.Model.__init__": true, + "google.generativeai.protos.Model.__le__": true, + "google.generativeai.protos.Model.__lt__": true, + "google.generativeai.protos.Model.__ne__": true, + "google.generativeai.protos.Model.__new__": true, + "google.generativeai.protos.Model.__or__": true, + "google.generativeai.protos.Model.__ror__": true, + "google.generativeai.protos.Model.base_model_id": true, + "google.generativeai.protos.Model.copy_from": true, + "google.generativeai.protos.Model.description": true, + "google.generativeai.protos.Model.deserialize": true, + "google.generativeai.protos.Model.display_name": true, + "google.generativeai.protos.Model.from_json": true, + "google.generativeai.protos.Model.input_token_limit": true, + "google.generativeai.protos.Model.max_temperature": true, + "google.generativeai.protos.Model.mro": true, + "google.generativeai.protos.Model.name": true, + "google.generativeai.protos.Model.output_token_limit": true, + "google.generativeai.protos.Model.pb": true, + "google.generativeai.protos.Model.serialize": true, + "google.generativeai.protos.Model.supported_generation_methods": true, + "google.generativeai.protos.Model.temperature": true, + "google.generativeai.protos.Model.to_dict": true, + "google.generativeai.protos.Model.to_json": true, + "google.generativeai.protos.Model.top_k": true, + "google.generativeai.protos.Model.top_p": true, + "google.generativeai.protos.Model.version": true, + "google.generativeai.protos.Model.wrap": true, + "google.generativeai.protos.Part": false, + "google.generativeai.protos.Part.__call__": true, + "google.generativeai.protos.Part.__eq__": true, + "google.generativeai.protos.Part.__ge__": true, + "google.generativeai.protos.Part.__gt__": true, + "google.generativeai.protos.Part.__init__": true, + "google.generativeai.protos.Part.__le__": true, + "google.generativeai.protos.Part.__lt__": true, + "google.generativeai.protos.Part.__ne__": true, + "google.generativeai.protos.Part.__new__": true, + "google.generativeai.protos.Part.__or__": true, + "google.generativeai.protos.Part.__ror__": true, + "google.generativeai.protos.Part.code_execution_result": true, + "google.generativeai.protos.Part.copy_from": true, + "google.generativeai.protos.Part.deserialize": true, + "google.generativeai.protos.Part.executable_code": true, + "google.generativeai.protos.Part.file_data": true, + "google.generativeai.protos.Part.from_json": true, + "google.generativeai.protos.Part.function_call": true, + "google.generativeai.protos.Part.function_response": true, + "google.generativeai.protos.Part.inline_data": true, + "google.generativeai.protos.Part.mro": true, + "google.generativeai.protos.Part.pb": true, + "google.generativeai.protos.Part.serialize": true, + "google.generativeai.protos.Part.text": true, + "google.generativeai.protos.Part.to_dict": true, + "google.generativeai.protos.Part.to_json": true, + "google.generativeai.protos.Part.wrap": true, + "google.generativeai.protos.Permission": false, + "google.generativeai.protos.Permission.GranteeType": false, + "google.generativeai.protos.Permission.GranteeType.EVERYONE": true, + "google.generativeai.protos.Permission.GranteeType.GRANTEE_TYPE_UNSPECIFIED": true, + "google.generativeai.protos.Permission.GranteeType.GROUP": true, + "google.generativeai.protos.Permission.GranteeType.USER": true, + "google.generativeai.protos.Permission.GranteeType.__abs__": true, + "google.generativeai.protos.Permission.GranteeType.__add__": true, + "google.generativeai.protos.Permission.GranteeType.__and__": true, + "google.generativeai.protos.Permission.GranteeType.__bool__": true, + "google.generativeai.protos.Permission.GranteeType.__contains__": true, + "google.generativeai.protos.Permission.GranteeType.__eq__": true, + "google.generativeai.protos.Permission.GranteeType.__floordiv__": true, + "google.generativeai.protos.Permission.GranteeType.__ge__": true, + "google.generativeai.protos.Permission.GranteeType.__getitem__": true, + "google.generativeai.protos.Permission.GranteeType.__gt__": true, + "google.generativeai.protos.Permission.GranteeType.__init__": true, + "google.generativeai.protos.Permission.GranteeType.__invert__": true, + "google.generativeai.protos.Permission.GranteeType.__iter__": true, + "google.generativeai.protos.Permission.GranteeType.__le__": true, + "google.generativeai.protos.Permission.GranteeType.__len__": true, + "google.generativeai.protos.Permission.GranteeType.__lshift__": true, + "google.generativeai.protos.Permission.GranteeType.__lt__": true, + "google.generativeai.protos.Permission.GranteeType.__mod__": true, + "google.generativeai.protos.Permission.GranteeType.__mul__": true, + "google.generativeai.protos.Permission.GranteeType.__ne__": true, + "google.generativeai.protos.Permission.GranteeType.__neg__": true, + "google.generativeai.protos.Permission.GranteeType.__new__": true, + "google.generativeai.protos.Permission.GranteeType.__or__": true, + "google.generativeai.protos.Permission.GranteeType.__pos__": true, + "google.generativeai.protos.Permission.GranteeType.__pow__": true, + "google.generativeai.protos.Permission.GranteeType.__radd__": true, + "google.generativeai.protos.Permission.GranteeType.__rand__": true, + "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": true, + "google.generativeai.protos.Permission.GranteeType.__rlshift__": true, + "google.generativeai.protos.Permission.GranteeType.__rmod__": true, + "google.generativeai.protos.Permission.GranteeType.__rmul__": true, + "google.generativeai.protos.Permission.GranteeType.__ror__": true, + "google.generativeai.protos.Permission.GranteeType.__rpow__": true, + "google.generativeai.protos.Permission.GranteeType.__rrshift__": true, + "google.generativeai.protos.Permission.GranteeType.__rshift__": true, + "google.generativeai.protos.Permission.GranteeType.__rsub__": true, + "google.generativeai.protos.Permission.GranteeType.__rtruediv__": true, + "google.generativeai.protos.Permission.GranteeType.__rxor__": true, + "google.generativeai.protos.Permission.GranteeType.__sub__": true, + "google.generativeai.protos.Permission.GranteeType.__truediv__": true, + "google.generativeai.protos.Permission.GranteeType.__xor__": true, + "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": true, + "google.generativeai.protos.Permission.GranteeType.bit_count": true, + "google.generativeai.protos.Permission.GranteeType.bit_length": true, + "google.generativeai.protos.Permission.GranteeType.conjugate": true, + "google.generativeai.protos.Permission.GranteeType.denominator": true, + "google.generativeai.protos.Permission.GranteeType.from_bytes": true, + "google.generativeai.protos.Permission.GranteeType.imag": true, + "google.generativeai.protos.Permission.GranteeType.numerator": true, + "google.generativeai.protos.Permission.GranteeType.real": true, + "google.generativeai.protos.Permission.GranteeType.to_bytes": true, + "google.generativeai.protos.Permission.Role": false, + "google.generativeai.protos.Permission.Role.OWNER": true, + "google.generativeai.protos.Permission.Role.READER": true, + "google.generativeai.protos.Permission.Role.ROLE_UNSPECIFIED": true, + "google.generativeai.protos.Permission.Role.WRITER": true, + "google.generativeai.protos.Permission.Role.__abs__": true, + "google.generativeai.protos.Permission.Role.__add__": true, + "google.generativeai.protos.Permission.Role.__and__": true, + "google.generativeai.protos.Permission.Role.__bool__": true, + "google.generativeai.protos.Permission.Role.__contains__": true, + "google.generativeai.protos.Permission.Role.__eq__": true, + "google.generativeai.protos.Permission.Role.__floordiv__": true, + "google.generativeai.protos.Permission.Role.__ge__": true, + "google.generativeai.protos.Permission.Role.__getitem__": true, + "google.generativeai.protos.Permission.Role.__gt__": true, + "google.generativeai.protos.Permission.Role.__init__": true, + "google.generativeai.protos.Permission.Role.__invert__": true, + "google.generativeai.protos.Permission.Role.__iter__": true, + "google.generativeai.protos.Permission.Role.__le__": true, + "google.generativeai.protos.Permission.Role.__len__": true, + "google.generativeai.protos.Permission.Role.__lshift__": true, + "google.generativeai.protos.Permission.Role.__lt__": true, + "google.generativeai.protos.Permission.Role.__mod__": true, + "google.generativeai.protos.Permission.Role.__mul__": true, + "google.generativeai.protos.Permission.Role.__ne__": true, + "google.generativeai.protos.Permission.Role.__neg__": true, + "google.generativeai.protos.Permission.Role.__new__": true, + "google.generativeai.protos.Permission.Role.__or__": true, + "google.generativeai.protos.Permission.Role.__pos__": true, + "google.generativeai.protos.Permission.Role.__pow__": true, + "google.generativeai.protos.Permission.Role.__radd__": true, + "google.generativeai.protos.Permission.Role.__rand__": true, + "google.generativeai.protos.Permission.Role.__rfloordiv__": true, + "google.generativeai.protos.Permission.Role.__rlshift__": true, + "google.generativeai.protos.Permission.Role.__rmod__": true, + "google.generativeai.protos.Permission.Role.__rmul__": true, + "google.generativeai.protos.Permission.Role.__ror__": true, + "google.generativeai.protos.Permission.Role.__rpow__": true, + "google.generativeai.protos.Permission.Role.__rrshift__": true, + "google.generativeai.protos.Permission.Role.__rshift__": true, + "google.generativeai.protos.Permission.Role.__rsub__": true, + "google.generativeai.protos.Permission.Role.__rtruediv__": true, + "google.generativeai.protos.Permission.Role.__rxor__": true, + "google.generativeai.protos.Permission.Role.__sub__": true, + "google.generativeai.protos.Permission.Role.__truediv__": true, + "google.generativeai.protos.Permission.Role.__xor__": true, + "google.generativeai.protos.Permission.Role.as_integer_ratio": true, + "google.generativeai.protos.Permission.Role.bit_count": true, + "google.generativeai.protos.Permission.Role.bit_length": true, + "google.generativeai.protos.Permission.Role.conjugate": true, + "google.generativeai.protos.Permission.Role.denominator": true, + "google.generativeai.protos.Permission.Role.from_bytes": true, + "google.generativeai.protos.Permission.Role.imag": true, + "google.generativeai.protos.Permission.Role.numerator": true, + "google.generativeai.protos.Permission.Role.real": true, + "google.generativeai.protos.Permission.Role.to_bytes": true, + "google.generativeai.protos.Permission.__call__": true, + "google.generativeai.protos.Permission.__eq__": true, + "google.generativeai.protos.Permission.__ge__": true, + "google.generativeai.protos.Permission.__gt__": true, + "google.generativeai.protos.Permission.__init__": true, + "google.generativeai.protos.Permission.__le__": true, + "google.generativeai.protos.Permission.__lt__": true, + "google.generativeai.protos.Permission.__ne__": true, + "google.generativeai.protos.Permission.__new__": true, + "google.generativeai.protos.Permission.__or__": true, + "google.generativeai.protos.Permission.__ror__": true, + "google.generativeai.protos.Permission.copy_from": true, + "google.generativeai.protos.Permission.deserialize": true, + "google.generativeai.protos.Permission.email_address": true, + "google.generativeai.protos.Permission.from_json": true, + "google.generativeai.protos.Permission.grantee_type": true, + "google.generativeai.protos.Permission.mro": true, + "google.generativeai.protos.Permission.name": true, + "google.generativeai.protos.Permission.pb": true, + "google.generativeai.protos.Permission.role": true, + "google.generativeai.protos.Permission.serialize": true, + "google.generativeai.protos.Permission.to_dict": true, + "google.generativeai.protos.Permission.to_json": true, + "google.generativeai.protos.Permission.wrap": true, + "google.generativeai.protos.QueryCorpusRequest": false, + "google.generativeai.protos.QueryCorpusRequest.__call__": true, + "google.generativeai.protos.QueryCorpusRequest.__eq__": true, + "google.generativeai.protos.QueryCorpusRequest.__ge__": true, + "google.generativeai.protos.QueryCorpusRequest.__gt__": true, + "google.generativeai.protos.QueryCorpusRequest.__init__": true, + "google.generativeai.protos.QueryCorpusRequest.__le__": true, + "google.generativeai.protos.QueryCorpusRequest.__lt__": true, + "google.generativeai.protos.QueryCorpusRequest.__ne__": true, + "google.generativeai.protos.QueryCorpusRequest.__new__": true, + "google.generativeai.protos.QueryCorpusRequest.__or__": true, + "google.generativeai.protos.QueryCorpusRequest.__ror__": true, + "google.generativeai.protos.QueryCorpusRequest.copy_from": true, + "google.generativeai.protos.QueryCorpusRequest.deserialize": true, + "google.generativeai.protos.QueryCorpusRequest.from_json": true, + "google.generativeai.protos.QueryCorpusRequest.metadata_filters": true, + "google.generativeai.protos.QueryCorpusRequest.mro": true, + "google.generativeai.protos.QueryCorpusRequest.name": true, + "google.generativeai.protos.QueryCorpusRequest.pb": true, + "google.generativeai.protos.QueryCorpusRequest.query": true, + "google.generativeai.protos.QueryCorpusRequest.results_count": true, + "google.generativeai.protos.QueryCorpusRequest.serialize": true, + "google.generativeai.protos.QueryCorpusRequest.to_dict": true, + "google.generativeai.protos.QueryCorpusRequest.to_json": true, + "google.generativeai.protos.QueryCorpusRequest.wrap": true, + "google.generativeai.protos.QueryCorpusResponse": false, + "google.generativeai.protos.QueryCorpusResponse.__call__": true, + "google.generativeai.protos.QueryCorpusResponse.__eq__": true, + "google.generativeai.protos.QueryCorpusResponse.__ge__": true, + "google.generativeai.protos.QueryCorpusResponse.__gt__": true, + "google.generativeai.protos.QueryCorpusResponse.__init__": true, + "google.generativeai.protos.QueryCorpusResponse.__le__": true, + "google.generativeai.protos.QueryCorpusResponse.__lt__": true, + "google.generativeai.protos.QueryCorpusResponse.__ne__": true, + "google.generativeai.protos.QueryCorpusResponse.__new__": true, + "google.generativeai.protos.QueryCorpusResponse.__or__": true, + "google.generativeai.protos.QueryCorpusResponse.__ror__": true, + "google.generativeai.protos.QueryCorpusResponse.copy_from": true, + "google.generativeai.protos.QueryCorpusResponse.deserialize": true, + "google.generativeai.protos.QueryCorpusResponse.from_json": true, + "google.generativeai.protos.QueryCorpusResponse.mro": true, + "google.generativeai.protos.QueryCorpusResponse.pb": true, + "google.generativeai.protos.QueryCorpusResponse.relevant_chunks": true, + "google.generativeai.protos.QueryCorpusResponse.serialize": true, + "google.generativeai.protos.QueryCorpusResponse.to_dict": true, + "google.generativeai.protos.QueryCorpusResponse.to_json": true, + "google.generativeai.protos.QueryCorpusResponse.wrap": true, + "google.generativeai.protos.QueryDocumentRequest": false, + "google.generativeai.protos.QueryDocumentRequest.__call__": true, + "google.generativeai.protos.QueryDocumentRequest.__eq__": true, + "google.generativeai.protos.QueryDocumentRequest.__ge__": true, + "google.generativeai.protos.QueryDocumentRequest.__gt__": true, + "google.generativeai.protos.QueryDocumentRequest.__init__": true, + "google.generativeai.protos.QueryDocumentRequest.__le__": true, + "google.generativeai.protos.QueryDocumentRequest.__lt__": true, + "google.generativeai.protos.QueryDocumentRequest.__ne__": true, + "google.generativeai.protos.QueryDocumentRequest.__new__": true, + "google.generativeai.protos.QueryDocumentRequest.__or__": true, + "google.generativeai.protos.QueryDocumentRequest.__ror__": true, + "google.generativeai.protos.QueryDocumentRequest.copy_from": true, + "google.generativeai.protos.QueryDocumentRequest.deserialize": true, + "google.generativeai.protos.QueryDocumentRequest.from_json": true, + "google.generativeai.protos.QueryDocumentRequest.metadata_filters": true, + "google.generativeai.protos.QueryDocumentRequest.mro": true, + "google.generativeai.protos.QueryDocumentRequest.name": true, + "google.generativeai.protos.QueryDocumentRequest.pb": true, + "google.generativeai.protos.QueryDocumentRequest.query": true, + "google.generativeai.protos.QueryDocumentRequest.results_count": true, + "google.generativeai.protos.QueryDocumentRequest.serialize": true, + "google.generativeai.protos.QueryDocumentRequest.to_dict": true, + "google.generativeai.protos.QueryDocumentRequest.to_json": true, + "google.generativeai.protos.QueryDocumentRequest.wrap": true, + "google.generativeai.protos.QueryDocumentResponse": false, + "google.generativeai.protos.QueryDocumentResponse.__call__": true, + "google.generativeai.protos.QueryDocumentResponse.__eq__": true, + "google.generativeai.protos.QueryDocumentResponse.__ge__": true, + "google.generativeai.protos.QueryDocumentResponse.__gt__": true, + "google.generativeai.protos.QueryDocumentResponse.__init__": true, + "google.generativeai.protos.QueryDocumentResponse.__le__": true, + "google.generativeai.protos.QueryDocumentResponse.__lt__": true, + "google.generativeai.protos.QueryDocumentResponse.__ne__": true, + "google.generativeai.protos.QueryDocumentResponse.__new__": true, + "google.generativeai.protos.QueryDocumentResponse.__or__": true, + "google.generativeai.protos.QueryDocumentResponse.__ror__": true, + "google.generativeai.protos.QueryDocumentResponse.copy_from": true, + "google.generativeai.protos.QueryDocumentResponse.deserialize": true, + "google.generativeai.protos.QueryDocumentResponse.from_json": true, + "google.generativeai.protos.QueryDocumentResponse.mro": true, + "google.generativeai.protos.QueryDocumentResponse.pb": true, + "google.generativeai.protos.QueryDocumentResponse.relevant_chunks": true, + "google.generativeai.protos.QueryDocumentResponse.serialize": true, + "google.generativeai.protos.QueryDocumentResponse.to_dict": true, + "google.generativeai.protos.QueryDocumentResponse.to_json": true, + "google.generativeai.protos.QueryDocumentResponse.wrap": true, + "google.generativeai.protos.RelevantChunk": false, + "google.generativeai.protos.RelevantChunk.__call__": true, + "google.generativeai.protos.RelevantChunk.__eq__": true, + "google.generativeai.protos.RelevantChunk.__ge__": true, + "google.generativeai.protos.RelevantChunk.__gt__": true, + "google.generativeai.protos.RelevantChunk.__init__": true, + "google.generativeai.protos.RelevantChunk.__le__": true, + "google.generativeai.protos.RelevantChunk.__lt__": true, + "google.generativeai.protos.RelevantChunk.__ne__": true, + "google.generativeai.protos.RelevantChunk.__new__": true, + "google.generativeai.protos.RelevantChunk.__or__": true, + "google.generativeai.protos.RelevantChunk.__ror__": true, + "google.generativeai.protos.RelevantChunk.chunk": true, + "google.generativeai.protos.RelevantChunk.chunk_relevance_score": true, + "google.generativeai.protos.RelevantChunk.copy_from": true, + "google.generativeai.protos.RelevantChunk.deserialize": true, + "google.generativeai.protos.RelevantChunk.from_json": true, + "google.generativeai.protos.RelevantChunk.mro": true, + "google.generativeai.protos.RelevantChunk.pb": true, + "google.generativeai.protos.RelevantChunk.serialize": true, + "google.generativeai.protos.RelevantChunk.to_dict": true, + "google.generativeai.protos.RelevantChunk.to_json": true, + "google.generativeai.protos.RelevantChunk.wrap": true, + "google.generativeai.protos.SafetyFeedback": false, + "google.generativeai.protos.SafetyFeedback.__call__": true, + "google.generativeai.protos.SafetyFeedback.__eq__": true, + "google.generativeai.protos.SafetyFeedback.__ge__": true, + "google.generativeai.protos.SafetyFeedback.__gt__": true, + "google.generativeai.protos.SafetyFeedback.__init__": true, + "google.generativeai.protos.SafetyFeedback.__le__": true, + "google.generativeai.protos.SafetyFeedback.__lt__": true, + "google.generativeai.protos.SafetyFeedback.__ne__": true, + "google.generativeai.protos.SafetyFeedback.__new__": true, + "google.generativeai.protos.SafetyFeedback.__or__": true, + "google.generativeai.protos.SafetyFeedback.__ror__": true, + "google.generativeai.protos.SafetyFeedback.copy_from": true, + "google.generativeai.protos.SafetyFeedback.deserialize": true, + "google.generativeai.protos.SafetyFeedback.from_json": true, + "google.generativeai.protos.SafetyFeedback.mro": true, + "google.generativeai.protos.SafetyFeedback.pb": true, + "google.generativeai.protos.SafetyFeedback.rating": true, + "google.generativeai.protos.SafetyFeedback.serialize": true, + "google.generativeai.protos.SafetyFeedback.setting": true, + "google.generativeai.protos.SafetyFeedback.to_dict": true, + "google.generativeai.protos.SafetyFeedback.to_json": true, + "google.generativeai.protos.SafetyFeedback.wrap": true, + "google.generativeai.protos.SafetyRating": false, + "google.generativeai.protos.SafetyRating.HarmProbability": false, + "google.generativeai.protos.SafetyRating.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true, + "google.generativeai.protos.SafetyRating.HarmProbability.HIGH": true, + "google.generativeai.protos.SafetyRating.HarmProbability.LOW": true, + "google.generativeai.protos.SafetyRating.HarmProbability.MEDIUM": true, + "google.generativeai.protos.SafetyRating.HarmProbability.NEGLIGIBLE": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__add__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__and__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__contains__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__init__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__iter__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__le__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__len__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__new__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__or__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": true, + "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": true, + "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": true, + "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": true, + "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": true, + "google.generativeai.protos.SafetyRating.HarmProbability.denominator": true, + "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes": true, + "google.generativeai.protos.SafetyRating.HarmProbability.imag": true, + "google.generativeai.protos.SafetyRating.HarmProbability.numerator": true, + "google.generativeai.protos.SafetyRating.HarmProbability.real": true, + "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": true, + "google.generativeai.protos.SafetyRating.__call__": true, + "google.generativeai.protos.SafetyRating.__eq__": true, + "google.generativeai.protos.SafetyRating.__ge__": true, + "google.generativeai.protos.SafetyRating.__gt__": true, + "google.generativeai.protos.SafetyRating.__init__": true, + "google.generativeai.protos.SafetyRating.__le__": true, + "google.generativeai.protos.SafetyRating.__lt__": true, + "google.generativeai.protos.SafetyRating.__ne__": true, + "google.generativeai.protos.SafetyRating.__new__": true, + "google.generativeai.protos.SafetyRating.__or__": true, + "google.generativeai.protos.SafetyRating.__ror__": true, + "google.generativeai.protos.SafetyRating.blocked": true, + "google.generativeai.protos.SafetyRating.category": true, + "google.generativeai.protos.SafetyRating.copy_from": true, + "google.generativeai.protos.SafetyRating.deserialize": true, + "google.generativeai.protos.SafetyRating.from_json": true, + "google.generativeai.protos.SafetyRating.mro": true, + "google.generativeai.protos.SafetyRating.pb": true, + "google.generativeai.protos.SafetyRating.probability": true, + "google.generativeai.protos.SafetyRating.serialize": true, + "google.generativeai.protos.SafetyRating.to_dict": true, + "google.generativeai.protos.SafetyRating.to_json": true, + "google.generativeai.protos.SafetyRating.wrap": true, + "google.generativeai.protos.SafetySetting": false, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold": false, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_NONE": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": true, + "google.generativeai.protos.SafetySetting.__call__": true, + "google.generativeai.protos.SafetySetting.__eq__": true, + "google.generativeai.protos.SafetySetting.__ge__": true, + "google.generativeai.protos.SafetySetting.__gt__": true, + "google.generativeai.protos.SafetySetting.__init__": true, + "google.generativeai.protos.SafetySetting.__le__": true, + "google.generativeai.protos.SafetySetting.__lt__": true, + "google.generativeai.protos.SafetySetting.__ne__": true, + "google.generativeai.protos.SafetySetting.__new__": true, + "google.generativeai.protos.SafetySetting.__or__": true, + "google.generativeai.protos.SafetySetting.__ror__": true, + "google.generativeai.protos.SafetySetting.category": true, + "google.generativeai.protos.SafetySetting.copy_from": true, + "google.generativeai.protos.SafetySetting.deserialize": true, + "google.generativeai.protos.SafetySetting.from_json": true, + "google.generativeai.protos.SafetySetting.mro": true, + "google.generativeai.protos.SafetySetting.pb": true, + "google.generativeai.protos.SafetySetting.serialize": true, + "google.generativeai.protos.SafetySetting.threshold": true, + "google.generativeai.protos.SafetySetting.to_dict": true, + "google.generativeai.protos.SafetySetting.to_json": true, + "google.generativeai.protos.SafetySetting.wrap": true, + "google.generativeai.protos.Schema": false, + "google.generativeai.protos.Schema.PropertiesEntry": false, + "google.generativeai.protos.Schema.PropertiesEntry.__call__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__eq__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__ge__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__gt__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__init__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__le__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__lt__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__ne__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__new__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__or__": true, + "google.generativeai.protos.Schema.PropertiesEntry.__ror__": true, + "google.generativeai.protos.Schema.PropertiesEntry.copy_from": true, + "google.generativeai.protos.Schema.PropertiesEntry.deserialize": true, + "google.generativeai.protos.Schema.PropertiesEntry.from_json": true, + "google.generativeai.protos.Schema.PropertiesEntry.key": true, + "google.generativeai.protos.Schema.PropertiesEntry.mro": true, + "google.generativeai.protos.Schema.PropertiesEntry.pb": true, + "google.generativeai.protos.Schema.PropertiesEntry.serialize": true, + "google.generativeai.protos.Schema.PropertiesEntry.to_dict": true, + "google.generativeai.protos.Schema.PropertiesEntry.to_json": true, + "google.generativeai.protos.Schema.PropertiesEntry.value": true, + "google.generativeai.protos.Schema.PropertiesEntry.wrap": true, + "google.generativeai.protos.Schema.__call__": true, + "google.generativeai.protos.Schema.__eq__": true, + "google.generativeai.protos.Schema.__ge__": true, + "google.generativeai.protos.Schema.__gt__": true, + "google.generativeai.protos.Schema.__init__": true, + "google.generativeai.protos.Schema.__le__": true, + "google.generativeai.protos.Schema.__lt__": true, + "google.generativeai.protos.Schema.__ne__": true, + "google.generativeai.protos.Schema.__new__": true, + "google.generativeai.protos.Schema.__or__": true, + "google.generativeai.protos.Schema.__ror__": true, + "google.generativeai.protos.Schema.copy_from": true, + "google.generativeai.protos.Schema.description": true, + "google.generativeai.protos.Schema.deserialize": true, + "google.generativeai.protos.Schema.enum": true, + "google.generativeai.protos.Schema.format_": true, + "google.generativeai.protos.Schema.from_json": true, + "google.generativeai.protos.Schema.items": true, + "google.generativeai.protos.Schema.mro": true, + "google.generativeai.protos.Schema.nullable": true, + "google.generativeai.protos.Schema.pb": true, + "google.generativeai.protos.Schema.properties": true, + "google.generativeai.protos.Schema.required": true, + "google.generativeai.protos.Schema.serialize": true, + "google.generativeai.protos.Schema.to_dict": true, + "google.generativeai.protos.Schema.to_json": true, + "google.generativeai.protos.Schema.type_": true, + "google.generativeai.protos.Schema.wrap": true, + "google.generativeai.protos.SemanticRetrieverConfig": false, + "google.generativeai.protos.SemanticRetrieverConfig.__call__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__eq__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__ge__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__gt__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__init__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__le__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__lt__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__ne__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__new__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__or__": true, + "google.generativeai.protos.SemanticRetrieverConfig.__ror__": true, + "google.generativeai.protos.SemanticRetrieverConfig.copy_from": true, + "google.generativeai.protos.SemanticRetrieverConfig.deserialize": true, + "google.generativeai.protos.SemanticRetrieverConfig.from_json": true, + "google.generativeai.protos.SemanticRetrieverConfig.max_chunks_count": true, + "google.generativeai.protos.SemanticRetrieverConfig.metadata_filters": true, + "google.generativeai.protos.SemanticRetrieverConfig.minimum_relevance_score": true, + "google.generativeai.protos.SemanticRetrieverConfig.mro": true, + "google.generativeai.protos.SemanticRetrieverConfig.pb": true, + "google.generativeai.protos.SemanticRetrieverConfig.query": true, + "google.generativeai.protos.SemanticRetrieverConfig.serialize": true, + "google.generativeai.protos.SemanticRetrieverConfig.source": true, + "google.generativeai.protos.SemanticRetrieverConfig.to_dict": true, + "google.generativeai.protos.SemanticRetrieverConfig.to_json": true, + "google.generativeai.protos.SemanticRetrieverConfig.wrap": true, + "google.generativeai.protos.StringList": false, + "google.generativeai.protos.StringList.__call__": true, + "google.generativeai.protos.StringList.__eq__": true, + "google.generativeai.protos.StringList.__ge__": true, + "google.generativeai.protos.StringList.__gt__": true, + "google.generativeai.protos.StringList.__init__": true, + "google.generativeai.protos.StringList.__le__": true, + "google.generativeai.protos.StringList.__lt__": true, + "google.generativeai.protos.StringList.__ne__": true, + "google.generativeai.protos.StringList.__new__": true, + "google.generativeai.protos.StringList.__or__": true, + "google.generativeai.protos.StringList.__ror__": true, + "google.generativeai.protos.StringList.copy_from": true, + "google.generativeai.protos.StringList.deserialize": true, + "google.generativeai.protos.StringList.from_json": true, + "google.generativeai.protos.StringList.mro": true, + "google.generativeai.protos.StringList.pb": true, + "google.generativeai.protos.StringList.serialize": true, + "google.generativeai.protos.StringList.to_dict": true, + "google.generativeai.protos.StringList.to_json": true, + "google.generativeai.protos.StringList.values": true, + "google.generativeai.protos.StringList.wrap": true, + "google.generativeai.protos.TaskType": false, + "google.generativeai.protos.TaskType.CLASSIFICATION": true, + "google.generativeai.protos.TaskType.CLUSTERING": true, + "google.generativeai.protos.TaskType.FACT_VERIFICATION": true, + "google.generativeai.protos.TaskType.QUESTION_ANSWERING": true, + "google.generativeai.protos.TaskType.RETRIEVAL_DOCUMENT": true, + "google.generativeai.protos.TaskType.RETRIEVAL_QUERY": true, + "google.generativeai.protos.TaskType.SEMANTIC_SIMILARITY": true, + "google.generativeai.protos.TaskType.TASK_TYPE_UNSPECIFIED": true, + "google.generativeai.protos.TaskType.__abs__": true, + "google.generativeai.protos.TaskType.__add__": true, + "google.generativeai.protos.TaskType.__and__": true, + "google.generativeai.protos.TaskType.__bool__": true, + "google.generativeai.protos.TaskType.__contains__": true, + "google.generativeai.protos.TaskType.__eq__": true, + "google.generativeai.protos.TaskType.__floordiv__": true, + "google.generativeai.protos.TaskType.__ge__": true, + "google.generativeai.protos.TaskType.__getitem__": true, + "google.generativeai.protos.TaskType.__gt__": true, + "google.generativeai.protos.TaskType.__init__": true, + "google.generativeai.protos.TaskType.__invert__": true, + "google.generativeai.protos.TaskType.__iter__": true, + "google.generativeai.protos.TaskType.__le__": true, + "google.generativeai.protos.TaskType.__len__": true, + "google.generativeai.protos.TaskType.__lshift__": true, + "google.generativeai.protos.TaskType.__lt__": true, + "google.generativeai.protos.TaskType.__mod__": true, + "google.generativeai.protos.TaskType.__mul__": true, + "google.generativeai.protos.TaskType.__ne__": true, + "google.generativeai.protos.TaskType.__neg__": true, + "google.generativeai.protos.TaskType.__new__": true, + "google.generativeai.protos.TaskType.__or__": true, + "google.generativeai.protos.TaskType.__pos__": true, + "google.generativeai.protos.TaskType.__pow__": true, + "google.generativeai.protos.TaskType.__radd__": true, + "google.generativeai.protos.TaskType.__rand__": true, + "google.generativeai.protos.TaskType.__rfloordiv__": true, + "google.generativeai.protos.TaskType.__rlshift__": true, + "google.generativeai.protos.TaskType.__rmod__": true, + "google.generativeai.protos.TaskType.__rmul__": true, + "google.generativeai.protos.TaskType.__ror__": true, + "google.generativeai.protos.TaskType.__rpow__": true, + "google.generativeai.protos.TaskType.__rrshift__": true, + "google.generativeai.protos.TaskType.__rshift__": true, + "google.generativeai.protos.TaskType.__rsub__": true, + "google.generativeai.protos.TaskType.__rtruediv__": true, + "google.generativeai.protos.TaskType.__rxor__": true, + "google.generativeai.protos.TaskType.__sub__": true, + "google.generativeai.protos.TaskType.__truediv__": true, + "google.generativeai.protos.TaskType.__xor__": true, + "google.generativeai.protos.TaskType.as_integer_ratio": true, + "google.generativeai.protos.TaskType.bit_count": true, + "google.generativeai.protos.TaskType.bit_length": true, + "google.generativeai.protos.TaskType.conjugate": true, + "google.generativeai.protos.TaskType.denominator": true, + "google.generativeai.protos.TaskType.from_bytes": true, + "google.generativeai.protos.TaskType.imag": true, + "google.generativeai.protos.TaskType.numerator": true, + "google.generativeai.protos.TaskType.real": true, + "google.generativeai.protos.TaskType.to_bytes": true, + "google.generativeai.protos.TextCompletion": false, + "google.generativeai.protos.TextCompletion.__call__": true, + "google.generativeai.protos.TextCompletion.__eq__": true, + "google.generativeai.protos.TextCompletion.__ge__": true, + "google.generativeai.protos.TextCompletion.__gt__": true, + "google.generativeai.protos.TextCompletion.__init__": true, + "google.generativeai.protos.TextCompletion.__le__": true, + "google.generativeai.protos.TextCompletion.__lt__": true, + "google.generativeai.protos.TextCompletion.__ne__": true, + "google.generativeai.protos.TextCompletion.__new__": true, + "google.generativeai.protos.TextCompletion.__or__": true, + "google.generativeai.protos.TextCompletion.__ror__": true, + "google.generativeai.protos.TextCompletion.citation_metadata": true, + "google.generativeai.protos.TextCompletion.copy_from": true, + "google.generativeai.protos.TextCompletion.deserialize": true, + "google.generativeai.protos.TextCompletion.from_json": true, + "google.generativeai.protos.TextCompletion.mro": true, + "google.generativeai.protos.TextCompletion.output": true, + "google.generativeai.protos.TextCompletion.pb": true, + "google.generativeai.protos.TextCompletion.safety_ratings": true, + "google.generativeai.protos.TextCompletion.serialize": true, + "google.generativeai.protos.TextCompletion.to_dict": true, + "google.generativeai.protos.TextCompletion.to_json": true, + "google.generativeai.protos.TextCompletion.wrap": true, + "google.generativeai.protos.TextPrompt": false, + "google.generativeai.protos.TextPrompt.__call__": true, + "google.generativeai.protos.TextPrompt.__eq__": true, + "google.generativeai.protos.TextPrompt.__ge__": true, + "google.generativeai.protos.TextPrompt.__gt__": true, + "google.generativeai.protos.TextPrompt.__init__": true, + "google.generativeai.protos.TextPrompt.__le__": true, + "google.generativeai.protos.TextPrompt.__lt__": true, + "google.generativeai.protos.TextPrompt.__ne__": true, + "google.generativeai.protos.TextPrompt.__new__": true, + "google.generativeai.protos.TextPrompt.__or__": true, + "google.generativeai.protos.TextPrompt.__ror__": true, + "google.generativeai.protos.TextPrompt.copy_from": true, + "google.generativeai.protos.TextPrompt.deserialize": true, + "google.generativeai.protos.TextPrompt.from_json": true, + "google.generativeai.protos.TextPrompt.mro": true, + "google.generativeai.protos.TextPrompt.pb": true, + "google.generativeai.protos.TextPrompt.serialize": true, + "google.generativeai.protos.TextPrompt.text": true, + "google.generativeai.protos.TextPrompt.to_dict": true, + "google.generativeai.protos.TextPrompt.to_json": true, + "google.generativeai.protos.TextPrompt.wrap": true, + "google.generativeai.protos.Tool": false, + "google.generativeai.protos.Tool.__call__": true, + "google.generativeai.protos.Tool.__eq__": true, + "google.generativeai.protos.Tool.__ge__": true, + "google.generativeai.protos.Tool.__gt__": true, + "google.generativeai.protos.Tool.__init__": true, + "google.generativeai.protos.Tool.__le__": true, + "google.generativeai.protos.Tool.__lt__": true, + "google.generativeai.protos.Tool.__ne__": true, + "google.generativeai.protos.Tool.__new__": true, + "google.generativeai.protos.Tool.__or__": true, + "google.generativeai.protos.Tool.__ror__": true, + "google.generativeai.protos.Tool.code_execution": true, + "google.generativeai.protos.Tool.copy_from": true, + "google.generativeai.protos.Tool.deserialize": true, + "google.generativeai.protos.Tool.from_json": true, + "google.generativeai.protos.Tool.function_declarations": true, + "google.generativeai.protos.Tool.mro": true, + "google.generativeai.protos.Tool.pb": true, + "google.generativeai.protos.Tool.serialize": true, + "google.generativeai.protos.Tool.to_dict": true, + "google.generativeai.protos.Tool.to_json": true, + "google.generativeai.protos.Tool.wrap": true, + "google.generativeai.protos.ToolConfig": false, + "google.generativeai.protos.ToolConfig.__call__": true, + "google.generativeai.protos.ToolConfig.__eq__": true, + "google.generativeai.protos.ToolConfig.__ge__": true, + "google.generativeai.protos.ToolConfig.__gt__": true, + "google.generativeai.protos.ToolConfig.__init__": true, + "google.generativeai.protos.ToolConfig.__le__": true, + "google.generativeai.protos.ToolConfig.__lt__": true, + "google.generativeai.protos.ToolConfig.__ne__": true, + "google.generativeai.protos.ToolConfig.__new__": true, + "google.generativeai.protos.ToolConfig.__or__": true, + "google.generativeai.protos.ToolConfig.__ror__": true, + "google.generativeai.protos.ToolConfig.copy_from": true, + "google.generativeai.protos.ToolConfig.deserialize": true, + "google.generativeai.protos.ToolConfig.from_json": true, + "google.generativeai.protos.ToolConfig.function_calling_config": true, + "google.generativeai.protos.ToolConfig.mro": true, + "google.generativeai.protos.ToolConfig.pb": true, + "google.generativeai.protos.ToolConfig.serialize": true, + "google.generativeai.protos.ToolConfig.to_dict": true, + "google.generativeai.protos.ToolConfig.to_json": true, + "google.generativeai.protos.ToolConfig.wrap": true, + "google.generativeai.protos.TransferOwnershipRequest": false, + "google.generativeai.protos.TransferOwnershipRequest.__call__": true, + "google.generativeai.protos.TransferOwnershipRequest.__eq__": true, + "google.generativeai.protos.TransferOwnershipRequest.__ge__": true, + "google.generativeai.protos.TransferOwnershipRequest.__gt__": true, + "google.generativeai.protos.TransferOwnershipRequest.__init__": true, + "google.generativeai.protos.TransferOwnershipRequest.__le__": true, + "google.generativeai.protos.TransferOwnershipRequest.__lt__": true, + "google.generativeai.protos.TransferOwnershipRequest.__ne__": true, + "google.generativeai.protos.TransferOwnershipRequest.__new__": true, + "google.generativeai.protos.TransferOwnershipRequest.__or__": true, + "google.generativeai.protos.TransferOwnershipRequest.__ror__": true, + "google.generativeai.protos.TransferOwnershipRequest.copy_from": true, + "google.generativeai.protos.TransferOwnershipRequest.deserialize": true, + "google.generativeai.protos.TransferOwnershipRequest.email_address": true, + "google.generativeai.protos.TransferOwnershipRequest.from_json": true, + "google.generativeai.protos.TransferOwnershipRequest.mro": true, + "google.generativeai.protos.TransferOwnershipRequest.name": true, + "google.generativeai.protos.TransferOwnershipRequest.pb": true, + "google.generativeai.protos.TransferOwnershipRequest.serialize": true, + "google.generativeai.protos.TransferOwnershipRequest.to_dict": true, + "google.generativeai.protos.TransferOwnershipRequest.to_json": true, + "google.generativeai.protos.TransferOwnershipRequest.wrap": true, + "google.generativeai.protos.TransferOwnershipResponse": false, + "google.generativeai.protos.TransferOwnershipResponse.__call__": true, + "google.generativeai.protos.TransferOwnershipResponse.__eq__": true, + "google.generativeai.protos.TransferOwnershipResponse.__ge__": true, + "google.generativeai.protos.TransferOwnershipResponse.__gt__": true, + "google.generativeai.protos.TransferOwnershipResponse.__init__": true, + "google.generativeai.protos.TransferOwnershipResponse.__le__": true, + "google.generativeai.protos.TransferOwnershipResponse.__lt__": true, + "google.generativeai.protos.TransferOwnershipResponse.__ne__": true, + "google.generativeai.protos.TransferOwnershipResponse.__new__": true, + "google.generativeai.protos.TransferOwnershipResponse.__or__": true, + "google.generativeai.protos.TransferOwnershipResponse.__ror__": true, + "google.generativeai.protos.TransferOwnershipResponse.copy_from": true, + "google.generativeai.protos.TransferOwnershipResponse.deserialize": true, + "google.generativeai.protos.TransferOwnershipResponse.from_json": true, + "google.generativeai.protos.TransferOwnershipResponse.mro": true, + "google.generativeai.protos.TransferOwnershipResponse.pb": true, + "google.generativeai.protos.TransferOwnershipResponse.serialize": true, + "google.generativeai.protos.TransferOwnershipResponse.to_dict": true, + "google.generativeai.protos.TransferOwnershipResponse.to_json": true, + "google.generativeai.protos.TransferOwnershipResponse.wrap": true, + "google.generativeai.protos.TunedModel": false, + "google.generativeai.protos.TunedModel.State": false, + "google.generativeai.protos.TunedModel.State.ACTIVE": true, + "google.generativeai.protos.TunedModel.State.CREATING": true, + "google.generativeai.protos.TunedModel.State.FAILED": true, + "google.generativeai.protos.TunedModel.State.STATE_UNSPECIFIED": true, + "google.generativeai.protos.TunedModel.State.__abs__": true, + "google.generativeai.protos.TunedModel.State.__add__": true, + "google.generativeai.protos.TunedModel.State.__and__": true, + "google.generativeai.protos.TunedModel.State.__bool__": true, + "google.generativeai.protos.TunedModel.State.__contains__": true, + "google.generativeai.protos.TunedModel.State.__eq__": true, + "google.generativeai.protos.TunedModel.State.__floordiv__": true, + "google.generativeai.protos.TunedModel.State.__ge__": true, + "google.generativeai.protos.TunedModel.State.__getitem__": true, + "google.generativeai.protos.TunedModel.State.__gt__": true, + "google.generativeai.protos.TunedModel.State.__init__": true, + "google.generativeai.protos.TunedModel.State.__invert__": true, + "google.generativeai.protos.TunedModel.State.__iter__": true, + "google.generativeai.protos.TunedModel.State.__le__": true, + "google.generativeai.protos.TunedModel.State.__len__": true, + "google.generativeai.protos.TunedModel.State.__lshift__": true, + "google.generativeai.protos.TunedModel.State.__lt__": true, + "google.generativeai.protos.TunedModel.State.__mod__": true, + "google.generativeai.protos.TunedModel.State.__mul__": true, + "google.generativeai.protos.TunedModel.State.__ne__": true, + "google.generativeai.protos.TunedModel.State.__neg__": true, + "google.generativeai.protos.TunedModel.State.__new__": true, + "google.generativeai.protos.TunedModel.State.__or__": true, + "google.generativeai.protos.TunedModel.State.__pos__": true, + "google.generativeai.protos.TunedModel.State.__pow__": true, + "google.generativeai.protos.TunedModel.State.__radd__": true, + "google.generativeai.protos.TunedModel.State.__rand__": true, + "google.generativeai.protos.TunedModel.State.__rfloordiv__": true, + "google.generativeai.protos.TunedModel.State.__rlshift__": true, + "google.generativeai.protos.TunedModel.State.__rmod__": true, + "google.generativeai.protos.TunedModel.State.__rmul__": true, + "google.generativeai.protos.TunedModel.State.__ror__": true, + "google.generativeai.protos.TunedModel.State.__rpow__": true, + "google.generativeai.protos.TunedModel.State.__rrshift__": true, + "google.generativeai.protos.TunedModel.State.__rshift__": true, + "google.generativeai.protos.TunedModel.State.__rsub__": true, + "google.generativeai.protos.TunedModel.State.__rtruediv__": true, + "google.generativeai.protos.TunedModel.State.__rxor__": true, + "google.generativeai.protos.TunedModel.State.__sub__": true, + "google.generativeai.protos.TunedModel.State.__truediv__": true, + "google.generativeai.protos.TunedModel.State.__xor__": true, + "google.generativeai.protos.TunedModel.State.as_integer_ratio": true, + "google.generativeai.protos.TunedModel.State.bit_count": true, + "google.generativeai.protos.TunedModel.State.bit_length": true, + "google.generativeai.protos.TunedModel.State.conjugate": true, + "google.generativeai.protos.TunedModel.State.denominator": true, + "google.generativeai.protos.TunedModel.State.from_bytes": true, + "google.generativeai.protos.TunedModel.State.imag": true, + "google.generativeai.protos.TunedModel.State.numerator": true, + "google.generativeai.protos.TunedModel.State.real": true, + "google.generativeai.protos.TunedModel.State.to_bytes": true, + "google.generativeai.protos.TunedModel.__call__": true, + "google.generativeai.protos.TunedModel.__eq__": true, + "google.generativeai.protos.TunedModel.__ge__": true, + "google.generativeai.protos.TunedModel.__gt__": true, + "google.generativeai.protos.TunedModel.__init__": true, + "google.generativeai.protos.TunedModel.__le__": true, + "google.generativeai.protos.TunedModel.__lt__": true, + "google.generativeai.protos.TunedModel.__ne__": true, + "google.generativeai.protos.TunedModel.__new__": true, + "google.generativeai.protos.TunedModel.__or__": true, + "google.generativeai.protos.TunedModel.__ror__": true, + "google.generativeai.protos.TunedModel.base_model": true, + "google.generativeai.protos.TunedModel.copy_from": true, + "google.generativeai.protos.TunedModel.create_time": true, + "google.generativeai.protos.TunedModel.description": true, + "google.generativeai.protos.TunedModel.deserialize": true, + "google.generativeai.protos.TunedModel.display_name": true, + "google.generativeai.protos.TunedModel.from_json": true, + "google.generativeai.protos.TunedModel.mro": true, + "google.generativeai.protos.TunedModel.name": true, + "google.generativeai.protos.TunedModel.pb": true, + "google.generativeai.protos.TunedModel.serialize": true, + "google.generativeai.protos.TunedModel.state": true, + "google.generativeai.protos.TunedModel.temperature": true, + "google.generativeai.protos.TunedModel.to_dict": true, + "google.generativeai.protos.TunedModel.to_json": true, + "google.generativeai.protos.TunedModel.top_k": true, + "google.generativeai.protos.TunedModel.top_p": true, + "google.generativeai.protos.TunedModel.tuned_model_source": true, + "google.generativeai.protos.TunedModel.tuning_task": true, + "google.generativeai.protos.TunedModel.update_time": true, + "google.generativeai.protos.TunedModel.wrap": true, + "google.generativeai.protos.TunedModelSource": false, + "google.generativeai.protos.TunedModelSource.__call__": true, + "google.generativeai.protos.TunedModelSource.__eq__": true, + "google.generativeai.protos.TunedModelSource.__ge__": true, + "google.generativeai.protos.TunedModelSource.__gt__": true, + "google.generativeai.protos.TunedModelSource.__init__": true, + "google.generativeai.protos.TunedModelSource.__le__": true, + "google.generativeai.protos.TunedModelSource.__lt__": true, + "google.generativeai.protos.TunedModelSource.__ne__": true, + "google.generativeai.protos.TunedModelSource.__new__": true, + "google.generativeai.protos.TunedModelSource.__or__": true, + "google.generativeai.protos.TunedModelSource.__ror__": true, + "google.generativeai.protos.TunedModelSource.base_model": true, + "google.generativeai.protos.TunedModelSource.copy_from": true, + "google.generativeai.protos.TunedModelSource.deserialize": true, + "google.generativeai.protos.TunedModelSource.from_json": true, + "google.generativeai.protos.TunedModelSource.mro": true, + "google.generativeai.protos.TunedModelSource.pb": true, + "google.generativeai.protos.TunedModelSource.serialize": true, + "google.generativeai.protos.TunedModelSource.to_dict": true, + "google.generativeai.protos.TunedModelSource.to_json": true, + "google.generativeai.protos.TunedModelSource.tuned_model": true, + "google.generativeai.protos.TunedModelSource.wrap": true, + "google.generativeai.protos.TuningExample": false, + "google.generativeai.protos.TuningExample.__call__": true, + "google.generativeai.protos.TuningExample.__eq__": true, + "google.generativeai.protos.TuningExample.__ge__": true, + "google.generativeai.protos.TuningExample.__gt__": true, + "google.generativeai.protos.TuningExample.__init__": true, + "google.generativeai.protos.TuningExample.__le__": true, + "google.generativeai.protos.TuningExample.__lt__": true, + "google.generativeai.protos.TuningExample.__ne__": true, + "google.generativeai.protos.TuningExample.__new__": true, + "google.generativeai.protos.TuningExample.__or__": true, + "google.generativeai.protos.TuningExample.__ror__": true, + "google.generativeai.protos.TuningExample.copy_from": true, + "google.generativeai.protos.TuningExample.deserialize": true, + "google.generativeai.protos.TuningExample.from_json": true, + "google.generativeai.protos.TuningExample.mro": true, + "google.generativeai.protos.TuningExample.output": true, + "google.generativeai.protos.TuningExample.pb": true, + "google.generativeai.protos.TuningExample.serialize": true, + "google.generativeai.protos.TuningExample.text_input": true, + "google.generativeai.protos.TuningExample.to_dict": true, + "google.generativeai.protos.TuningExample.to_json": true, + "google.generativeai.protos.TuningExample.wrap": true, + "google.generativeai.protos.TuningExamples": false, + "google.generativeai.protos.TuningExamples.__call__": true, + "google.generativeai.protos.TuningExamples.__eq__": true, + "google.generativeai.protos.TuningExamples.__ge__": true, + "google.generativeai.protos.TuningExamples.__gt__": true, + "google.generativeai.protos.TuningExamples.__init__": true, + "google.generativeai.protos.TuningExamples.__le__": true, + "google.generativeai.protos.TuningExamples.__lt__": true, + "google.generativeai.protos.TuningExamples.__ne__": true, + "google.generativeai.protos.TuningExamples.__new__": true, + "google.generativeai.protos.TuningExamples.__or__": true, + "google.generativeai.protos.TuningExamples.__ror__": true, + "google.generativeai.protos.TuningExamples.copy_from": true, + "google.generativeai.protos.TuningExamples.deserialize": true, + "google.generativeai.protos.TuningExamples.examples": true, + "google.generativeai.protos.TuningExamples.from_json": true, + "google.generativeai.protos.TuningExamples.mro": true, + "google.generativeai.protos.TuningExamples.pb": true, + "google.generativeai.protos.TuningExamples.serialize": true, + "google.generativeai.protos.TuningExamples.to_dict": true, + "google.generativeai.protos.TuningExamples.to_json": true, + "google.generativeai.protos.TuningExamples.wrap": true, + "google.generativeai.protos.TuningSnapshot": false, + "google.generativeai.protos.TuningSnapshot.__call__": true, + "google.generativeai.protos.TuningSnapshot.__eq__": true, + "google.generativeai.protos.TuningSnapshot.__ge__": true, + "google.generativeai.protos.TuningSnapshot.__gt__": true, + "google.generativeai.protos.TuningSnapshot.__init__": true, + "google.generativeai.protos.TuningSnapshot.__le__": true, + "google.generativeai.protos.TuningSnapshot.__lt__": true, + "google.generativeai.protos.TuningSnapshot.__ne__": true, + "google.generativeai.protos.TuningSnapshot.__new__": true, + "google.generativeai.protos.TuningSnapshot.__or__": true, + "google.generativeai.protos.TuningSnapshot.__ror__": true, + "google.generativeai.protos.TuningSnapshot.compute_time": true, + "google.generativeai.protos.TuningSnapshot.copy_from": true, + "google.generativeai.protos.TuningSnapshot.deserialize": true, + "google.generativeai.protos.TuningSnapshot.epoch": true, + "google.generativeai.protos.TuningSnapshot.from_json": true, + "google.generativeai.protos.TuningSnapshot.mean_loss": true, + "google.generativeai.protos.TuningSnapshot.mro": true, + "google.generativeai.protos.TuningSnapshot.pb": true, + "google.generativeai.protos.TuningSnapshot.serialize": true, + "google.generativeai.protos.TuningSnapshot.step": true, + "google.generativeai.protos.TuningSnapshot.to_dict": true, + "google.generativeai.protos.TuningSnapshot.to_json": true, + "google.generativeai.protos.TuningSnapshot.wrap": true, + "google.generativeai.protos.TuningTask": false, + "google.generativeai.protos.TuningTask.__call__": true, + "google.generativeai.protos.TuningTask.__eq__": true, + "google.generativeai.protos.TuningTask.__ge__": true, + "google.generativeai.protos.TuningTask.__gt__": true, + "google.generativeai.protos.TuningTask.__init__": true, + "google.generativeai.protos.TuningTask.__le__": true, + "google.generativeai.protos.TuningTask.__lt__": true, + "google.generativeai.protos.TuningTask.__ne__": true, + "google.generativeai.protos.TuningTask.__new__": true, + "google.generativeai.protos.TuningTask.__or__": true, + "google.generativeai.protos.TuningTask.__ror__": true, + "google.generativeai.protos.TuningTask.complete_time": true, + "google.generativeai.protos.TuningTask.copy_from": true, + "google.generativeai.protos.TuningTask.deserialize": true, + "google.generativeai.protos.TuningTask.from_json": true, + "google.generativeai.protos.TuningTask.hyperparameters": true, + "google.generativeai.protos.TuningTask.mro": true, + "google.generativeai.protos.TuningTask.pb": true, + "google.generativeai.protos.TuningTask.serialize": true, + "google.generativeai.protos.TuningTask.snapshots": true, + "google.generativeai.protos.TuningTask.start_time": true, + "google.generativeai.protos.TuningTask.to_dict": true, + "google.generativeai.protos.TuningTask.to_json": true, + "google.generativeai.protos.TuningTask.training_data": true, + "google.generativeai.protos.TuningTask.wrap": true, + "google.generativeai.protos.Type": false, + "google.generativeai.protos.Type.ARRAY": true, + "google.generativeai.protos.Type.BOOLEAN": true, + "google.generativeai.protos.Type.INTEGER": true, + "google.generativeai.protos.Type.NUMBER": true, + "google.generativeai.protos.Type.OBJECT": true, + "google.generativeai.protos.Type.STRING": true, + "google.generativeai.protos.Type.TYPE_UNSPECIFIED": true, + "google.generativeai.protos.Type.__abs__": true, + "google.generativeai.protos.Type.__add__": true, + "google.generativeai.protos.Type.__and__": true, + "google.generativeai.protos.Type.__bool__": true, + "google.generativeai.protos.Type.__contains__": true, + "google.generativeai.protos.Type.__eq__": true, + "google.generativeai.protos.Type.__floordiv__": true, + "google.generativeai.protos.Type.__ge__": true, + "google.generativeai.protos.Type.__getitem__": true, + "google.generativeai.protos.Type.__gt__": true, + "google.generativeai.protos.Type.__init__": true, + "google.generativeai.protos.Type.__invert__": true, + "google.generativeai.protos.Type.__iter__": true, + "google.generativeai.protos.Type.__le__": true, + "google.generativeai.protos.Type.__len__": true, + "google.generativeai.protos.Type.__lshift__": true, + "google.generativeai.protos.Type.__lt__": true, + "google.generativeai.protos.Type.__mod__": true, + "google.generativeai.protos.Type.__mul__": true, + "google.generativeai.protos.Type.__ne__": true, + "google.generativeai.protos.Type.__neg__": true, + "google.generativeai.protos.Type.__new__": true, + "google.generativeai.protos.Type.__or__": true, + "google.generativeai.protos.Type.__pos__": true, + "google.generativeai.protos.Type.__pow__": true, + "google.generativeai.protos.Type.__radd__": true, + "google.generativeai.protos.Type.__rand__": true, + "google.generativeai.protos.Type.__rfloordiv__": true, + "google.generativeai.protos.Type.__rlshift__": true, + "google.generativeai.protos.Type.__rmod__": true, + "google.generativeai.protos.Type.__rmul__": true, + "google.generativeai.protos.Type.__ror__": true, + "google.generativeai.protos.Type.__rpow__": true, + "google.generativeai.protos.Type.__rrshift__": true, + "google.generativeai.protos.Type.__rshift__": true, + "google.generativeai.protos.Type.__rsub__": true, + "google.generativeai.protos.Type.__rtruediv__": true, + "google.generativeai.protos.Type.__rxor__": true, + "google.generativeai.protos.Type.__sub__": true, + "google.generativeai.protos.Type.__truediv__": true, + "google.generativeai.protos.Type.__xor__": true, + "google.generativeai.protos.Type.as_integer_ratio": true, + "google.generativeai.protos.Type.bit_count": true, + "google.generativeai.protos.Type.bit_length": true, + "google.generativeai.protos.Type.conjugate": true, + "google.generativeai.protos.Type.denominator": true, + "google.generativeai.protos.Type.from_bytes": true, + "google.generativeai.protos.Type.imag": true, + "google.generativeai.protos.Type.numerator": true, + "google.generativeai.protos.Type.real": true, + "google.generativeai.protos.Type.to_bytes": true, + "google.generativeai.protos.UpdateCachedContentRequest": false, + "google.generativeai.protos.UpdateCachedContentRequest.__call__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__eq__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__ge__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__gt__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__init__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__le__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__lt__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__ne__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__new__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__or__": true, + "google.generativeai.protos.UpdateCachedContentRequest.__ror__": true, + "google.generativeai.protos.UpdateCachedContentRequest.cached_content": true, + "google.generativeai.protos.UpdateCachedContentRequest.copy_from": true, + "google.generativeai.protos.UpdateCachedContentRequest.deserialize": true, + "google.generativeai.protos.UpdateCachedContentRequest.from_json": true, + "google.generativeai.protos.UpdateCachedContentRequest.mro": true, + "google.generativeai.protos.UpdateCachedContentRequest.pb": true, + "google.generativeai.protos.UpdateCachedContentRequest.serialize": true, + "google.generativeai.protos.UpdateCachedContentRequest.to_dict": true, + "google.generativeai.protos.UpdateCachedContentRequest.to_json": true, + "google.generativeai.protos.UpdateCachedContentRequest.update_mask": true, + "google.generativeai.protos.UpdateCachedContentRequest.wrap": true, + "google.generativeai.protos.UpdateChunkRequest": false, + "google.generativeai.protos.UpdateChunkRequest.__call__": true, + "google.generativeai.protos.UpdateChunkRequest.__eq__": true, + "google.generativeai.protos.UpdateChunkRequest.__ge__": true, + "google.generativeai.protos.UpdateChunkRequest.__gt__": true, + "google.generativeai.protos.UpdateChunkRequest.__init__": true, + "google.generativeai.protos.UpdateChunkRequest.__le__": true, + "google.generativeai.protos.UpdateChunkRequest.__lt__": true, + "google.generativeai.protos.UpdateChunkRequest.__ne__": true, + "google.generativeai.protos.UpdateChunkRequest.__new__": true, + "google.generativeai.protos.UpdateChunkRequest.__or__": true, + "google.generativeai.protos.UpdateChunkRequest.__ror__": true, + "google.generativeai.protos.UpdateChunkRequest.chunk": true, + "google.generativeai.protos.UpdateChunkRequest.copy_from": true, + "google.generativeai.protos.UpdateChunkRequest.deserialize": true, + "google.generativeai.protos.UpdateChunkRequest.from_json": true, + "google.generativeai.protos.UpdateChunkRequest.mro": true, + "google.generativeai.protos.UpdateChunkRequest.pb": true, + "google.generativeai.protos.UpdateChunkRequest.serialize": true, + "google.generativeai.protos.UpdateChunkRequest.to_dict": true, + "google.generativeai.protos.UpdateChunkRequest.to_json": true, + "google.generativeai.protos.UpdateChunkRequest.update_mask": true, + "google.generativeai.protos.UpdateChunkRequest.wrap": true, + "google.generativeai.protos.UpdateCorpusRequest": false, + "google.generativeai.protos.UpdateCorpusRequest.__call__": true, + "google.generativeai.protos.UpdateCorpusRequest.__eq__": true, + "google.generativeai.protos.UpdateCorpusRequest.__ge__": true, + "google.generativeai.protos.UpdateCorpusRequest.__gt__": true, + "google.generativeai.protos.UpdateCorpusRequest.__init__": true, + "google.generativeai.protos.UpdateCorpusRequest.__le__": true, + "google.generativeai.protos.UpdateCorpusRequest.__lt__": true, + "google.generativeai.protos.UpdateCorpusRequest.__ne__": true, + "google.generativeai.protos.UpdateCorpusRequest.__new__": true, + "google.generativeai.protos.UpdateCorpusRequest.__or__": true, + "google.generativeai.protos.UpdateCorpusRequest.__ror__": true, + "google.generativeai.protos.UpdateCorpusRequest.copy_from": true, + "google.generativeai.protos.UpdateCorpusRequest.corpus": true, + "google.generativeai.protos.UpdateCorpusRequest.deserialize": true, + "google.generativeai.protos.UpdateCorpusRequest.from_json": true, + "google.generativeai.protos.UpdateCorpusRequest.mro": true, + "google.generativeai.protos.UpdateCorpusRequest.pb": true, + "google.generativeai.protos.UpdateCorpusRequest.serialize": true, + "google.generativeai.protos.UpdateCorpusRequest.to_dict": true, + "google.generativeai.protos.UpdateCorpusRequest.to_json": true, + "google.generativeai.protos.UpdateCorpusRequest.update_mask": true, + "google.generativeai.protos.UpdateCorpusRequest.wrap": true, + "google.generativeai.protos.UpdateDocumentRequest": false, + "google.generativeai.protos.UpdateDocumentRequest.__call__": true, + "google.generativeai.protos.UpdateDocumentRequest.__eq__": true, + "google.generativeai.protos.UpdateDocumentRequest.__ge__": true, + "google.generativeai.protos.UpdateDocumentRequest.__gt__": true, + "google.generativeai.protos.UpdateDocumentRequest.__init__": true, + "google.generativeai.protos.UpdateDocumentRequest.__le__": true, + "google.generativeai.protos.UpdateDocumentRequest.__lt__": true, + "google.generativeai.protos.UpdateDocumentRequest.__ne__": true, + "google.generativeai.protos.UpdateDocumentRequest.__new__": true, + "google.generativeai.protos.UpdateDocumentRequest.__or__": true, + "google.generativeai.protos.UpdateDocumentRequest.__ror__": true, + "google.generativeai.protos.UpdateDocumentRequest.copy_from": true, + "google.generativeai.protos.UpdateDocumentRequest.deserialize": true, + "google.generativeai.protos.UpdateDocumentRequest.document": true, + "google.generativeai.protos.UpdateDocumentRequest.from_json": true, + "google.generativeai.protos.UpdateDocumentRequest.mro": true, + "google.generativeai.protos.UpdateDocumentRequest.pb": true, + "google.generativeai.protos.UpdateDocumentRequest.serialize": true, + "google.generativeai.protos.UpdateDocumentRequest.to_dict": true, + "google.generativeai.protos.UpdateDocumentRequest.to_json": true, + "google.generativeai.protos.UpdateDocumentRequest.update_mask": true, + "google.generativeai.protos.UpdateDocumentRequest.wrap": true, + "google.generativeai.protos.UpdatePermissionRequest": false, + "google.generativeai.protos.UpdatePermissionRequest.__call__": true, + "google.generativeai.protos.UpdatePermissionRequest.__eq__": true, + "google.generativeai.protos.UpdatePermissionRequest.__ge__": true, + "google.generativeai.protos.UpdatePermissionRequest.__gt__": true, + "google.generativeai.protos.UpdatePermissionRequest.__init__": true, + "google.generativeai.protos.UpdatePermissionRequest.__le__": true, + "google.generativeai.protos.UpdatePermissionRequest.__lt__": true, + "google.generativeai.protos.UpdatePermissionRequest.__ne__": true, + "google.generativeai.protos.UpdatePermissionRequest.__new__": true, + "google.generativeai.protos.UpdatePermissionRequest.__or__": true, + "google.generativeai.protos.UpdatePermissionRequest.__ror__": true, + "google.generativeai.protos.UpdatePermissionRequest.copy_from": true, + "google.generativeai.protos.UpdatePermissionRequest.deserialize": true, + "google.generativeai.protos.UpdatePermissionRequest.from_json": true, + "google.generativeai.protos.UpdatePermissionRequest.mro": true, + "google.generativeai.protos.UpdatePermissionRequest.pb": true, + "google.generativeai.protos.UpdatePermissionRequest.permission": true, + "google.generativeai.protos.UpdatePermissionRequest.serialize": true, + "google.generativeai.protos.UpdatePermissionRequest.to_dict": true, + "google.generativeai.protos.UpdatePermissionRequest.to_json": true, + "google.generativeai.protos.UpdatePermissionRequest.update_mask": true, + "google.generativeai.protos.UpdatePermissionRequest.wrap": true, + "google.generativeai.protos.UpdateTunedModelRequest": false, + "google.generativeai.protos.UpdateTunedModelRequest.__call__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__eq__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__ge__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__gt__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__init__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__le__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__lt__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__ne__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__new__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__or__": true, + "google.generativeai.protos.UpdateTunedModelRequest.__ror__": true, + "google.generativeai.protos.UpdateTunedModelRequest.copy_from": true, + "google.generativeai.protos.UpdateTunedModelRequest.deserialize": true, + "google.generativeai.protos.UpdateTunedModelRequest.from_json": true, + "google.generativeai.protos.UpdateTunedModelRequest.mro": true, + "google.generativeai.protos.UpdateTunedModelRequest.pb": true, + "google.generativeai.protos.UpdateTunedModelRequest.serialize": true, + "google.generativeai.protos.UpdateTunedModelRequest.to_dict": true, + "google.generativeai.protos.UpdateTunedModelRequest.to_json": true, + "google.generativeai.protos.UpdateTunedModelRequest.tuned_model": true, + "google.generativeai.protos.UpdateTunedModelRequest.update_mask": true, + "google.generativeai.protos.UpdateTunedModelRequest.wrap": true, + "google.generativeai.protos.VideoMetadata": false, + "google.generativeai.protos.VideoMetadata.__call__": true, + "google.generativeai.protos.VideoMetadata.__eq__": true, + "google.generativeai.protos.VideoMetadata.__ge__": true, + "google.generativeai.protos.VideoMetadata.__gt__": true, + "google.generativeai.protos.VideoMetadata.__init__": true, + "google.generativeai.protos.VideoMetadata.__le__": true, + "google.generativeai.protos.VideoMetadata.__lt__": true, + "google.generativeai.protos.VideoMetadata.__ne__": true, + "google.generativeai.protos.VideoMetadata.__new__": true, + "google.generativeai.protos.VideoMetadata.__or__": true, + "google.generativeai.protos.VideoMetadata.__ror__": true, + "google.generativeai.protos.VideoMetadata.copy_from": true, + "google.generativeai.protos.VideoMetadata.deserialize": true, + "google.generativeai.protos.VideoMetadata.from_json": true, + "google.generativeai.protos.VideoMetadata.mro": true, + "google.generativeai.protos.VideoMetadata.pb": true, + "google.generativeai.protos.VideoMetadata.serialize": true, + "google.generativeai.protos.VideoMetadata.to_dict": true, + "google.generativeai.protos.VideoMetadata.to_json": true, + "google.generativeai.protos.VideoMetadata.video_duration": true, + "google.generativeai.protos.VideoMetadata.wrap": true, + "google.generativeai.types": false, + "google.generativeai.types.AnyModelNameOptions": false, + "google.generativeai.types.AsyncGenerateContentResponse": false, + "google.generativeai.types.AsyncGenerateContentResponse.__eq__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__ge__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__gt__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__init__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__le__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__lt__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__ne__": true, + "google.generativeai.types.AsyncGenerateContentResponse.__new__": true, + "google.generativeai.types.AsyncGenerateContentResponse.candidates": true, + "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": true, + "google.generativeai.types.AsyncGenerateContentResponse.from_response": true, + "google.generativeai.types.AsyncGenerateContentResponse.parts": true, + "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback": true, + "google.generativeai.types.AsyncGenerateContentResponse.resolve": true, + "google.generativeai.types.AsyncGenerateContentResponse.text": true, + "google.generativeai.types.AsyncGenerateContentResponse.to_dict": true, + "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata": true, + "google.generativeai.types.AuthorError": false, + "google.generativeai.types.AuthorError.__eq__": true, + "google.generativeai.types.AuthorError.__ge__": true, + "google.generativeai.types.AuthorError.__gt__": true, + "google.generativeai.types.AuthorError.__init__": true, + "google.generativeai.types.AuthorError.__le__": true, + "google.generativeai.types.AuthorError.__lt__": true, + "google.generativeai.types.AuthorError.__ne__": true, + "google.generativeai.types.AuthorError.__new__": true, + "google.generativeai.types.AuthorError.add_note": true, + "google.generativeai.types.AuthorError.args": true, + "google.generativeai.types.AuthorError.with_traceback": true, + "google.generativeai.types.BaseModelNameOptions": false, + "google.generativeai.types.BlobDict": false, + "google.generativeai.types.BlobDict.__contains__": true, + "google.generativeai.types.BlobDict.__eq__": true, + "google.generativeai.types.BlobDict.__ge__": true, + "google.generativeai.types.BlobDict.__getitem__": true, + "google.generativeai.types.BlobDict.__gt__": true, + "google.generativeai.types.BlobDict.__init__": true, + "google.generativeai.types.BlobDict.__iter__": true, + "google.generativeai.types.BlobDict.__le__": true, + "google.generativeai.types.BlobDict.__len__": true, + "google.generativeai.types.BlobDict.__lt__": true, + "google.generativeai.types.BlobDict.__ne__": true, + "google.generativeai.types.BlobDict.__new__": true, + "google.generativeai.types.BlobDict.__or__": true, + "google.generativeai.types.BlobDict.__ror__": true, + "google.generativeai.types.BlobDict.clear": true, + "google.generativeai.types.BlobDict.copy": true, + "google.generativeai.types.BlobDict.fromkeys": true, + "google.generativeai.types.BlobDict.get": true, + "google.generativeai.types.BlobDict.items": true, + "google.generativeai.types.BlobDict.keys": true, + "google.generativeai.types.BlobDict.pop": true, + "google.generativeai.types.BlobDict.popitem": true, + "google.generativeai.types.BlobDict.setdefault": true, + "google.generativeai.types.BlobDict.update": true, + "google.generativeai.types.BlobDict.values": true, + "google.generativeai.types.BlobType": false, + "google.generativeai.types.BlockedPromptException": false, + "google.generativeai.types.BlockedPromptException.__eq__": true, + "google.generativeai.types.BlockedPromptException.__ge__": true, + "google.generativeai.types.BlockedPromptException.__gt__": true, + "google.generativeai.types.BlockedPromptException.__init__": true, + "google.generativeai.types.BlockedPromptException.__le__": true, + "google.generativeai.types.BlockedPromptException.__lt__": true, + "google.generativeai.types.BlockedPromptException.__ne__": true, + "google.generativeai.types.BlockedPromptException.__new__": true, + "google.generativeai.types.BlockedPromptException.add_note": true, + "google.generativeai.types.BlockedPromptException.args": true, + "google.generativeai.types.BlockedPromptException.with_traceback": true, + "google.generativeai.types.BlockedReason": false, + "google.generativeai.types.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true, + "google.generativeai.types.BlockedReason.OTHER": true, + "google.generativeai.types.BlockedReason.SAFETY": true, + "google.generativeai.types.BlockedReason.__abs__": true, + "google.generativeai.types.BlockedReason.__add__": true, + "google.generativeai.types.BlockedReason.__and__": true, + "google.generativeai.types.BlockedReason.__bool__": true, + "google.generativeai.types.BlockedReason.__contains__": true, + "google.generativeai.types.BlockedReason.__eq__": true, + "google.generativeai.types.BlockedReason.__floordiv__": true, + "google.generativeai.types.BlockedReason.__ge__": true, + "google.generativeai.types.BlockedReason.__getitem__": true, + "google.generativeai.types.BlockedReason.__gt__": true, + "google.generativeai.types.BlockedReason.__init__": true, + "google.generativeai.types.BlockedReason.__invert__": true, + "google.generativeai.types.BlockedReason.__iter__": true, + "google.generativeai.types.BlockedReason.__le__": true, + "google.generativeai.types.BlockedReason.__len__": true, + "google.generativeai.types.BlockedReason.__lshift__": true, + "google.generativeai.types.BlockedReason.__lt__": true, + "google.generativeai.types.BlockedReason.__mod__": true, + "google.generativeai.types.BlockedReason.__mul__": true, + "google.generativeai.types.BlockedReason.__ne__": true, + "google.generativeai.types.BlockedReason.__neg__": true, + "google.generativeai.types.BlockedReason.__new__": true, + "google.generativeai.types.BlockedReason.__or__": true, + "google.generativeai.types.BlockedReason.__pos__": true, + "google.generativeai.types.BlockedReason.__pow__": true, + "google.generativeai.types.BlockedReason.__radd__": true, + "google.generativeai.types.BlockedReason.__rand__": true, + "google.generativeai.types.BlockedReason.__rfloordiv__": true, + "google.generativeai.types.BlockedReason.__rlshift__": true, + "google.generativeai.types.BlockedReason.__rmod__": true, + "google.generativeai.types.BlockedReason.__rmul__": true, + "google.generativeai.types.BlockedReason.__ror__": true, + "google.generativeai.types.BlockedReason.__rpow__": true, + "google.generativeai.types.BlockedReason.__rrshift__": true, + "google.generativeai.types.BlockedReason.__rshift__": true, + "google.generativeai.types.BlockedReason.__rsub__": true, + "google.generativeai.types.BlockedReason.__rtruediv__": true, + "google.generativeai.types.BlockedReason.__rxor__": true, + "google.generativeai.types.BlockedReason.__sub__": true, + "google.generativeai.types.BlockedReason.__truediv__": true, + "google.generativeai.types.BlockedReason.__xor__": true, + "google.generativeai.types.BlockedReason.as_integer_ratio": true, + "google.generativeai.types.BlockedReason.bit_count": true, + "google.generativeai.types.BlockedReason.bit_length": true, + "google.generativeai.types.BlockedReason.conjugate": true, + "google.generativeai.types.BlockedReason.denominator": true, + "google.generativeai.types.BlockedReason.from_bytes": true, + "google.generativeai.types.BlockedReason.imag": true, + "google.generativeai.types.BlockedReason.numerator": true, + "google.generativeai.types.BlockedReason.real": true, + "google.generativeai.types.BlockedReason.to_bytes": true, + "google.generativeai.types.BrokenResponseError": false, + "google.generativeai.types.BrokenResponseError.__eq__": true, + "google.generativeai.types.BrokenResponseError.__ge__": true, + "google.generativeai.types.BrokenResponseError.__gt__": true, + "google.generativeai.types.BrokenResponseError.__init__": true, + "google.generativeai.types.BrokenResponseError.__le__": true, + "google.generativeai.types.BrokenResponseError.__lt__": true, + "google.generativeai.types.BrokenResponseError.__ne__": true, + "google.generativeai.types.BrokenResponseError.__new__": true, + "google.generativeai.types.BrokenResponseError.add_note": true, + "google.generativeai.types.BrokenResponseError.args": true, + "google.generativeai.types.BrokenResponseError.with_traceback": true, + "google.generativeai.types.CallableFunctionDeclaration": false, + "google.generativeai.types.CallableFunctionDeclaration.__call__": true, + "google.generativeai.types.CallableFunctionDeclaration.__eq__": true, + "google.generativeai.types.CallableFunctionDeclaration.__ge__": true, + "google.generativeai.types.CallableFunctionDeclaration.__gt__": true, + "google.generativeai.types.CallableFunctionDeclaration.__init__": true, + "google.generativeai.types.CallableFunctionDeclaration.__le__": true, + "google.generativeai.types.CallableFunctionDeclaration.__lt__": true, + "google.generativeai.types.CallableFunctionDeclaration.__ne__": true, + "google.generativeai.types.CallableFunctionDeclaration.__new__": true, + "google.generativeai.types.CallableFunctionDeclaration.description": true, + "google.generativeai.types.CallableFunctionDeclaration.from_function": true, + "google.generativeai.types.CallableFunctionDeclaration.from_proto": true, + "google.generativeai.types.CallableFunctionDeclaration.name": true, + "google.generativeai.types.CallableFunctionDeclaration.parameters": true, + "google.generativeai.types.CallableFunctionDeclaration.to_proto": true, + "google.generativeai.types.ChatResponse": false, + "google.generativeai.types.ChatResponse.__eq__": true, + "google.generativeai.types.ChatResponse.__ge__": true, + "google.generativeai.types.ChatResponse.__gt__": true, + "google.generativeai.types.ChatResponse.__init__": true, + "google.generativeai.types.ChatResponse.__le__": true, + "google.generativeai.types.ChatResponse.__lt__": true, + "google.generativeai.types.ChatResponse.__ne__": true, + "google.generativeai.types.ChatResponse.__new__": true, + "google.generativeai.types.ChatResponse.last": true, + "google.generativeai.types.ChatResponse.reply": true, + "google.generativeai.types.ChatResponse.to_dict": true, + "google.generativeai.types.ChatResponse.top_k": true, + "google.generativeai.types.ChatResponse.top_p": true, + "google.generativeai.types.CitationMetadataDict": false, + "google.generativeai.types.CitationMetadataDict.__contains__": true, + "google.generativeai.types.CitationMetadataDict.__eq__": true, + "google.generativeai.types.CitationMetadataDict.__ge__": true, + "google.generativeai.types.CitationMetadataDict.__getitem__": true, + "google.generativeai.types.CitationMetadataDict.__gt__": true, + "google.generativeai.types.CitationMetadataDict.__init__": true, + "google.generativeai.types.CitationMetadataDict.__iter__": true, + "google.generativeai.types.CitationMetadataDict.__le__": true, + "google.generativeai.types.CitationMetadataDict.__len__": true, + "google.generativeai.types.CitationMetadataDict.__lt__": true, + "google.generativeai.types.CitationMetadataDict.__ne__": true, + "google.generativeai.types.CitationMetadataDict.__new__": true, + "google.generativeai.types.CitationMetadataDict.__or__": true, + "google.generativeai.types.CitationMetadataDict.__ror__": true, + "google.generativeai.types.CitationMetadataDict.clear": true, + "google.generativeai.types.CitationMetadataDict.copy": true, + "google.generativeai.types.CitationMetadataDict.fromkeys": true, + "google.generativeai.types.CitationMetadataDict.get": true, + "google.generativeai.types.CitationMetadataDict.items": true, + "google.generativeai.types.CitationMetadataDict.keys": true, + "google.generativeai.types.CitationMetadataDict.pop": true, + "google.generativeai.types.CitationMetadataDict.popitem": true, + "google.generativeai.types.CitationMetadataDict.setdefault": true, + "google.generativeai.types.CitationMetadataDict.update": true, + "google.generativeai.types.CitationMetadataDict.values": true, + "google.generativeai.types.CitationSourceDict": false, + "google.generativeai.types.CitationSourceDict.__contains__": true, + "google.generativeai.types.CitationSourceDict.__eq__": true, + "google.generativeai.types.CitationSourceDict.__ge__": true, + "google.generativeai.types.CitationSourceDict.__getitem__": true, + "google.generativeai.types.CitationSourceDict.__gt__": true, + "google.generativeai.types.CitationSourceDict.__init__": true, + "google.generativeai.types.CitationSourceDict.__iter__": true, + "google.generativeai.types.CitationSourceDict.__le__": true, + "google.generativeai.types.CitationSourceDict.__len__": true, + "google.generativeai.types.CitationSourceDict.__lt__": true, + "google.generativeai.types.CitationSourceDict.__ne__": true, + "google.generativeai.types.CitationSourceDict.__new__": true, + "google.generativeai.types.CitationSourceDict.__or__": true, + "google.generativeai.types.CitationSourceDict.__ror__": true, + "google.generativeai.types.CitationSourceDict.clear": true, + "google.generativeai.types.CitationSourceDict.copy": true, + "google.generativeai.types.CitationSourceDict.fromkeys": true, + "google.generativeai.types.CitationSourceDict.get": true, + "google.generativeai.types.CitationSourceDict.items": true, + "google.generativeai.types.CitationSourceDict.keys": true, + "google.generativeai.types.CitationSourceDict.pop": true, + "google.generativeai.types.CitationSourceDict.popitem": true, + "google.generativeai.types.CitationSourceDict.setdefault": true, + "google.generativeai.types.CitationSourceDict.update": true, + "google.generativeai.types.CitationSourceDict.values": true, + "google.generativeai.types.Completion": false, + "google.generativeai.types.Completion.__eq__": true, + "google.generativeai.types.Completion.__ge__": true, + "google.generativeai.types.Completion.__gt__": true, + "google.generativeai.types.Completion.__init__": true, + "google.generativeai.types.Completion.__le__": true, + "google.generativeai.types.Completion.__lt__": true, + "google.generativeai.types.Completion.__ne__": true, + "google.generativeai.types.Completion.__new__": true, + "google.generativeai.types.Completion.to_dict": true, + "google.generativeai.types.ContentDict": false, + "google.generativeai.types.ContentDict.__contains__": true, + "google.generativeai.types.ContentDict.__eq__": true, + "google.generativeai.types.ContentDict.__ge__": true, + "google.generativeai.types.ContentDict.__getitem__": true, + "google.generativeai.types.ContentDict.__gt__": true, + "google.generativeai.types.ContentDict.__init__": true, + "google.generativeai.types.ContentDict.__iter__": true, + "google.generativeai.types.ContentDict.__le__": true, + "google.generativeai.types.ContentDict.__len__": true, + "google.generativeai.types.ContentDict.__lt__": true, + "google.generativeai.types.ContentDict.__ne__": true, + "google.generativeai.types.ContentDict.__new__": true, + "google.generativeai.types.ContentDict.__or__": true, + "google.generativeai.types.ContentDict.__ror__": true, + "google.generativeai.types.ContentDict.clear": true, + "google.generativeai.types.ContentDict.copy": true, + "google.generativeai.types.ContentDict.fromkeys": true, + "google.generativeai.types.ContentDict.get": true, + "google.generativeai.types.ContentDict.items": true, + "google.generativeai.types.ContentDict.keys": true, + "google.generativeai.types.ContentDict.pop": true, + "google.generativeai.types.ContentDict.popitem": true, + "google.generativeai.types.ContentDict.setdefault": true, + "google.generativeai.types.ContentDict.update": true, + "google.generativeai.types.ContentDict.values": true, + "google.generativeai.types.ContentFilterDict": false, + "google.generativeai.types.ContentFilterDict.__contains__": true, + "google.generativeai.types.ContentFilterDict.__eq__": true, + "google.generativeai.types.ContentFilterDict.__ge__": true, + "google.generativeai.types.ContentFilterDict.__getitem__": true, + "google.generativeai.types.ContentFilterDict.__gt__": true, + "google.generativeai.types.ContentFilterDict.__init__": true, + "google.generativeai.types.ContentFilterDict.__iter__": true, + "google.generativeai.types.ContentFilterDict.__le__": true, + "google.generativeai.types.ContentFilterDict.__len__": true, + "google.generativeai.types.ContentFilterDict.__lt__": true, + "google.generativeai.types.ContentFilterDict.__ne__": true, + "google.generativeai.types.ContentFilterDict.__new__": true, + "google.generativeai.types.ContentFilterDict.__or__": true, + "google.generativeai.types.ContentFilterDict.__ror__": true, + "google.generativeai.types.ContentFilterDict.clear": true, + "google.generativeai.types.ContentFilterDict.copy": true, + "google.generativeai.types.ContentFilterDict.fromkeys": true, + "google.generativeai.types.ContentFilterDict.get": true, + "google.generativeai.types.ContentFilterDict.items": true, + "google.generativeai.types.ContentFilterDict.keys": true, + "google.generativeai.types.ContentFilterDict.pop": true, + "google.generativeai.types.ContentFilterDict.popitem": true, + "google.generativeai.types.ContentFilterDict.setdefault": true, + "google.generativeai.types.ContentFilterDict.update": true, + "google.generativeai.types.ContentFilterDict.values": true, + "google.generativeai.types.ContentType": false, + "google.generativeai.types.ContentsType": false, + "google.generativeai.types.ExampleDict": false, + "google.generativeai.types.ExampleDict.__contains__": true, + "google.generativeai.types.ExampleDict.__eq__": true, + "google.generativeai.types.ExampleDict.__ge__": true, + "google.generativeai.types.ExampleDict.__getitem__": true, + "google.generativeai.types.ExampleDict.__gt__": true, + "google.generativeai.types.ExampleDict.__init__": true, + "google.generativeai.types.ExampleDict.__iter__": true, + "google.generativeai.types.ExampleDict.__le__": true, + "google.generativeai.types.ExampleDict.__len__": true, + "google.generativeai.types.ExampleDict.__lt__": true, + "google.generativeai.types.ExampleDict.__ne__": true, + "google.generativeai.types.ExampleDict.__new__": true, + "google.generativeai.types.ExampleDict.__or__": true, + "google.generativeai.types.ExampleDict.__ror__": true, + "google.generativeai.types.ExampleDict.clear": true, + "google.generativeai.types.ExampleDict.copy": true, + "google.generativeai.types.ExampleDict.fromkeys": true, + "google.generativeai.types.ExampleDict.get": true, + "google.generativeai.types.ExampleDict.items": true, + "google.generativeai.types.ExampleDict.keys": true, + "google.generativeai.types.ExampleDict.pop": true, + "google.generativeai.types.ExampleDict.popitem": true, + "google.generativeai.types.ExampleDict.setdefault": true, + "google.generativeai.types.ExampleDict.update": true, + "google.generativeai.types.ExampleDict.values": true, + "google.generativeai.types.ExampleOptions": false, + "google.generativeai.types.ExamplesOptions": false, + "google.generativeai.types.File": false, + "google.generativeai.types.File.__eq__": true, + "google.generativeai.types.File.__ge__": true, + "google.generativeai.types.File.__gt__": true, + "google.generativeai.types.File.__init__": true, + "google.generativeai.types.File.__le__": true, + "google.generativeai.types.File.__lt__": true, + "google.generativeai.types.File.__ne__": true, + "google.generativeai.types.File.__new__": true, + "google.generativeai.types.File.create_time": true, + "google.generativeai.types.File.delete": true, + "google.generativeai.types.File.display_name": true, + "google.generativeai.types.File.error": true, + "google.generativeai.types.File.expiration_time": true, + "google.generativeai.types.File.mime_type": true, + "google.generativeai.types.File.name": true, + "google.generativeai.types.File.sha256_hash": true, + "google.generativeai.types.File.size_bytes": true, + "google.generativeai.types.File.state": true, + "google.generativeai.types.File.to_dict": true, + "google.generativeai.types.File.to_proto": true, + "google.generativeai.types.File.update_time": true, + "google.generativeai.types.File.uri": true, + "google.generativeai.types.File.video_metadata": true, + "google.generativeai.types.FileDataDict": false, + "google.generativeai.types.FileDataDict.__contains__": true, + "google.generativeai.types.FileDataDict.__eq__": true, + "google.generativeai.types.FileDataDict.__ge__": true, + "google.generativeai.types.FileDataDict.__getitem__": true, + "google.generativeai.types.FileDataDict.__gt__": true, + "google.generativeai.types.FileDataDict.__init__": true, + "google.generativeai.types.FileDataDict.__iter__": true, + "google.generativeai.types.FileDataDict.__le__": true, + "google.generativeai.types.FileDataDict.__len__": true, + "google.generativeai.types.FileDataDict.__lt__": true, + "google.generativeai.types.FileDataDict.__ne__": true, + "google.generativeai.types.FileDataDict.__new__": true, + "google.generativeai.types.FileDataDict.__or__": true, + "google.generativeai.types.FileDataDict.__ror__": true, + "google.generativeai.types.FileDataDict.clear": true, + "google.generativeai.types.FileDataDict.copy": true, + "google.generativeai.types.FileDataDict.fromkeys": true, + "google.generativeai.types.FileDataDict.get": true, + "google.generativeai.types.FileDataDict.items": true, + "google.generativeai.types.FileDataDict.keys": true, + "google.generativeai.types.FileDataDict.pop": true, + "google.generativeai.types.FileDataDict.popitem": true, + "google.generativeai.types.FileDataDict.setdefault": true, + "google.generativeai.types.FileDataDict.update": true, + "google.generativeai.types.FileDataDict.values": true, + "google.generativeai.types.FileDataType": false, + "google.generativeai.types.FunctionDeclaration": false, + "google.generativeai.types.FunctionDeclaration.__eq__": true, + "google.generativeai.types.FunctionDeclaration.__ge__": true, + "google.generativeai.types.FunctionDeclaration.__gt__": true, + "google.generativeai.types.FunctionDeclaration.__init__": true, + "google.generativeai.types.FunctionDeclaration.__le__": true, + "google.generativeai.types.FunctionDeclaration.__lt__": true, + "google.generativeai.types.FunctionDeclaration.__ne__": true, + "google.generativeai.types.FunctionDeclaration.__new__": true, + "google.generativeai.types.FunctionDeclaration.description": true, + "google.generativeai.types.FunctionDeclaration.from_function": true, + "google.generativeai.types.FunctionDeclaration.from_proto": true, + "google.generativeai.types.FunctionDeclaration.name": true, + "google.generativeai.types.FunctionDeclaration.parameters": true, + "google.generativeai.types.FunctionDeclaration.to_proto": true, + "google.generativeai.types.FunctionDeclarationType": false, + "google.generativeai.types.FunctionLibrary": false, + "google.generativeai.types.FunctionLibrary.__call__": true, + "google.generativeai.types.FunctionLibrary.__eq__": true, + "google.generativeai.types.FunctionLibrary.__ge__": true, + "google.generativeai.types.FunctionLibrary.__getitem__": true, + "google.generativeai.types.FunctionLibrary.__gt__": true, + "google.generativeai.types.FunctionLibrary.__init__": true, + "google.generativeai.types.FunctionLibrary.__le__": true, + "google.generativeai.types.FunctionLibrary.__lt__": true, + "google.generativeai.types.FunctionLibrary.__ne__": true, + "google.generativeai.types.FunctionLibrary.__new__": true, + "google.generativeai.types.FunctionLibrary.to_proto": true, + "google.generativeai.types.FunctionLibraryType": false, + "google.generativeai.types.GenerateContentResponse": false, + "google.generativeai.types.GenerateContentResponse.__eq__": true, + "google.generativeai.types.GenerateContentResponse.__ge__": true, + "google.generativeai.types.GenerateContentResponse.__gt__": true, + "google.generativeai.types.GenerateContentResponse.__init__": true, + "google.generativeai.types.GenerateContentResponse.__iter__": true, + "google.generativeai.types.GenerateContentResponse.__le__": true, + "google.generativeai.types.GenerateContentResponse.__lt__": true, + "google.generativeai.types.GenerateContentResponse.__ne__": true, + "google.generativeai.types.GenerateContentResponse.__new__": true, + "google.generativeai.types.GenerateContentResponse.candidates": true, + "google.generativeai.types.GenerateContentResponse.from_iterator": true, + "google.generativeai.types.GenerateContentResponse.from_response": true, + "google.generativeai.types.GenerateContentResponse.parts": true, + "google.generativeai.types.GenerateContentResponse.prompt_feedback": true, + "google.generativeai.types.GenerateContentResponse.resolve": true, + "google.generativeai.types.GenerateContentResponse.text": true, + "google.generativeai.types.GenerateContentResponse.to_dict": true, + "google.generativeai.types.GenerateContentResponse.usage_metadata": true, + "google.generativeai.types.GenerationConfig": false, + "google.generativeai.types.GenerationConfig.__eq__": true, + "google.generativeai.types.GenerationConfig.__ge__": true, + "google.generativeai.types.GenerationConfig.__gt__": true, + "google.generativeai.types.GenerationConfig.__init__": true, + "google.generativeai.types.GenerationConfig.__le__": true, + "google.generativeai.types.GenerationConfig.__lt__": true, + "google.generativeai.types.GenerationConfig.__ne__": true, + "google.generativeai.types.GenerationConfig.__new__": true, + "google.generativeai.types.GenerationConfig.candidate_count": true, + "google.generativeai.types.GenerationConfig.max_output_tokens": true, + "google.generativeai.types.GenerationConfig.response_mime_type": true, + "google.generativeai.types.GenerationConfig.response_schema": true, + "google.generativeai.types.GenerationConfig.stop_sequences": true, + "google.generativeai.types.GenerationConfig.temperature": true, + "google.generativeai.types.GenerationConfig.top_k": true, + "google.generativeai.types.GenerationConfig.top_p": true, + "google.generativeai.types.GenerationConfigDict": false, + "google.generativeai.types.GenerationConfigDict.__contains__": true, + "google.generativeai.types.GenerationConfigDict.__eq__": true, + "google.generativeai.types.GenerationConfigDict.__ge__": true, + "google.generativeai.types.GenerationConfigDict.__getitem__": true, + "google.generativeai.types.GenerationConfigDict.__gt__": true, + "google.generativeai.types.GenerationConfigDict.__init__": true, + "google.generativeai.types.GenerationConfigDict.__iter__": true, + "google.generativeai.types.GenerationConfigDict.__le__": true, + "google.generativeai.types.GenerationConfigDict.__len__": true, + "google.generativeai.types.GenerationConfigDict.__lt__": true, + "google.generativeai.types.GenerationConfigDict.__ne__": true, + "google.generativeai.types.GenerationConfigDict.__new__": true, + "google.generativeai.types.GenerationConfigDict.__or__": true, + "google.generativeai.types.GenerationConfigDict.__ror__": true, + "google.generativeai.types.GenerationConfigDict.clear": true, + "google.generativeai.types.GenerationConfigDict.copy": true, + "google.generativeai.types.GenerationConfigDict.fromkeys": true, + "google.generativeai.types.GenerationConfigDict.get": true, + "google.generativeai.types.GenerationConfigDict.items": true, + "google.generativeai.types.GenerationConfigDict.keys": true, + "google.generativeai.types.GenerationConfigDict.pop": true, + "google.generativeai.types.GenerationConfigDict.popitem": true, + "google.generativeai.types.GenerationConfigDict.setdefault": true, + "google.generativeai.types.GenerationConfigDict.update": true, + "google.generativeai.types.GenerationConfigDict.values": true, + "google.generativeai.types.GenerationConfigType": false, + "google.generativeai.types.HarmBlockThreshold": false, + "google.generativeai.types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true, + "google.generativeai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true, + "google.generativeai.types.HarmBlockThreshold.BLOCK_NONE": true, + "google.generativeai.types.HarmBlockThreshold.BLOCK_ONLY_HIGH": true, + "google.generativeai.types.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true, + "google.generativeai.types.HarmBlockThreshold.__abs__": true, + "google.generativeai.types.HarmBlockThreshold.__add__": true, + "google.generativeai.types.HarmBlockThreshold.__and__": true, + "google.generativeai.types.HarmBlockThreshold.__bool__": true, + "google.generativeai.types.HarmBlockThreshold.__contains__": true, + "google.generativeai.types.HarmBlockThreshold.__eq__": true, + "google.generativeai.types.HarmBlockThreshold.__floordiv__": true, + "google.generativeai.types.HarmBlockThreshold.__ge__": true, + "google.generativeai.types.HarmBlockThreshold.__getitem__": true, + "google.generativeai.types.HarmBlockThreshold.__gt__": true, + "google.generativeai.types.HarmBlockThreshold.__init__": true, + "google.generativeai.types.HarmBlockThreshold.__invert__": true, + "google.generativeai.types.HarmBlockThreshold.__iter__": true, + "google.generativeai.types.HarmBlockThreshold.__le__": true, + "google.generativeai.types.HarmBlockThreshold.__len__": true, + "google.generativeai.types.HarmBlockThreshold.__lshift__": true, + "google.generativeai.types.HarmBlockThreshold.__lt__": true, + "google.generativeai.types.HarmBlockThreshold.__mod__": true, + "google.generativeai.types.HarmBlockThreshold.__mul__": true, + "google.generativeai.types.HarmBlockThreshold.__ne__": true, + "google.generativeai.types.HarmBlockThreshold.__neg__": true, + "google.generativeai.types.HarmBlockThreshold.__new__": true, + "google.generativeai.types.HarmBlockThreshold.__or__": true, + "google.generativeai.types.HarmBlockThreshold.__pos__": true, + "google.generativeai.types.HarmBlockThreshold.__pow__": true, + "google.generativeai.types.HarmBlockThreshold.__radd__": true, + "google.generativeai.types.HarmBlockThreshold.__rand__": true, + "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": true, + "google.generativeai.types.HarmBlockThreshold.__rlshift__": true, + "google.generativeai.types.HarmBlockThreshold.__rmod__": true, + "google.generativeai.types.HarmBlockThreshold.__rmul__": true, + "google.generativeai.types.HarmBlockThreshold.__ror__": true, + "google.generativeai.types.HarmBlockThreshold.__rpow__": true, + "google.generativeai.types.HarmBlockThreshold.__rrshift__": true, + "google.generativeai.types.HarmBlockThreshold.__rshift__": true, + "google.generativeai.types.HarmBlockThreshold.__rsub__": true, + "google.generativeai.types.HarmBlockThreshold.__rtruediv__": true, + "google.generativeai.types.HarmBlockThreshold.__rxor__": true, + "google.generativeai.types.HarmBlockThreshold.__sub__": true, + "google.generativeai.types.HarmBlockThreshold.__truediv__": true, + "google.generativeai.types.HarmBlockThreshold.__xor__": true, + "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": true, + "google.generativeai.types.HarmBlockThreshold.bit_count": true, + "google.generativeai.types.HarmBlockThreshold.bit_length": true, + "google.generativeai.types.HarmBlockThreshold.conjugate": true, + "google.generativeai.types.HarmBlockThreshold.denominator": true, + "google.generativeai.types.HarmBlockThreshold.from_bytes": true, + "google.generativeai.types.HarmBlockThreshold.imag": true, + "google.generativeai.types.HarmBlockThreshold.numerator": true, + "google.generativeai.types.HarmBlockThreshold.real": true, + "google.generativeai.types.HarmBlockThreshold.to_bytes": true, + "google.generativeai.types.HarmCategory": false, + "google.generativeai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true, + "google.generativeai.types.HarmCategory.HARM_CATEGORY_HARASSMENT": true, + "google.generativeai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true, + "google.generativeai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true, + "google.generativeai.types.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true, + "google.generativeai.types.HarmCategory.__abs__": true, + "google.generativeai.types.HarmCategory.__add__": true, + "google.generativeai.types.HarmCategory.__and__": true, + "google.generativeai.types.HarmCategory.__bool__": true, + "google.generativeai.types.HarmCategory.__contains__": true, + "google.generativeai.types.HarmCategory.__eq__": true, + "google.generativeai.types.HarmCategory.__floordiv__": true, + "google.generativeai.types.HarmCategory.__ge__": true, + "google.generativeai.types.HarmCategory.__getitem__": true, + "google.generativeai.types.HarmCategory.__gt__": true, + "google.generativeai.types.HarmCategory.__init__": true, + "google.generativeai.types.HarmCategory.__invert__": true, + "google.generativeai.types.HarmCategory.__iter__": true, + "google.generativeai.types.HarmCategory.__le__": true, + "google.generativeai.types.HarmCategory.__len__": true, + "google.generativeai.types.HarmCategory.__lshift__": true, + "google.generativeai.types.HarmCategory.__lt__": true, + "google.generativeai.types.HarmCategory.__mod__": true, + "google.generativeai.types.HarmCategory.__mul__": true, + "google.generativeai.types.HarmCategory.__ne__": true, + "google.generativeai.types.HarmCategory.__neg__": true, + "google.generativeai.types.HarmCategory.__new__": true, + "google.generativeai.types.HarmCategory.__or__": true, + "google.generativeai.types.HarmCategory.__pos__": true, + "google.generativeai.types.HarmCategory.__pow__": true, + "google.generativeai.types.HarmCategory.__radd__": true, + "google.generativeai.types.HarmCategory.__rand__": true, + "google.generativeai.types.HarmCategory.__rfloordiv__": true, + "google.generativeai.types.HarmCategory.__rlshift__": true, + "google.generativeai.types.HarmCategory.__rmod__": true, + "google.generativeai.types.HarmCategory.__rmul__": true, + "google.generativeai.types.HarmCategory.__ror__": true, + "google.generativeai.types.HarmCategory.__rpow__": true, + "google.generativeai.types.HarmCategory.__rrshift__": true, + "google.generativeai.types.HarmCategory.__rshift__": true, + "google.generativeai.types.HarmCategory.__rsub__": true, + "google.generativeai.types.HarmCategory.__rtruediv__": true, + "google.generativeai.types.HarmCategory.__rxor__": true, + "google.generativeai.types.HarmCategory.__sub__": true, + "google.generativeai.types.HarmCategory.__truediv__": true, + "google.generativeai.types.HarmCategory.__xor__": true, + "google.generativeai.types.HarmCategory.as_integer_ratio": true, + "google.generativeai.types.HarmCategory.bit_count": true, + "google.generativeai.types.HarmCategory.bit_length": true, + "google.generativeai.types.HarmCategory.conjugate": true, + "google.generativeai.types.HarmCategory.denominator": true, + "google.generativeai.types.HarmCategory.from_bytes": true, + "google.generativeai.types.HarmCategory.imag": true, + "google.generativeai.types.HarmCategory.numerator": true, + "google.generativeai.types.HarmCategory.real": true, + "google.generativeai.types.HarmCategory.to_bytes": true, + "google.generativeai.types.HarmProbability": false, + "google.generativeai.types.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true, + "google.generativeai.types.HarmProbability.HIGH": true, + "google.generativeai.types.HarmProbability.LOW": true, + "google.generativeai.types.HarmProbability.MEDIUM": true, + "google.generativeai.types.HarmProbability.NEGLIGIBLE": true, + "google.generativeai.types.HarmProbability.__abs__": true, + "google.generativeai.types.HarmProbability.__add__": true, + "google.generativeai.types.HarmProbability.__and__": true, + "google.generativeai.types.HarmProbability.__bool__": true, + "google.generativeai.types.HarmProbability.__contains__": true, + "google.generativeai.types.HarmProbability.__eq__": true, + "google.generativeai.types.HarmProbability.__floordiv__": true, + "google.generativeai.types.HarmProbability.__ge__": true, + "google.generativeai.types.HarmProbability.__getitem__": true, + "google.generativeai.types.HarmProbability.__gt__": true, + "google.generativeai.types.HarmProbability.__init__": true, + "google.generativeai.types.HarmProbability.__invert__": true, + "google.generativeai.types.HarmProbability.__iter__": true, + "google.generativeai.types.HarmProbability.__le__": true, + "google.generativeai.types.HarmProbability.__len__": true, + "google.generativeai.types.HarmProbability.__lshift__": true, + "google.generativeai.types.HarmProbability.__lt__": true, + "google.generativeai.types.HarmProbability.__mod__": true, + "google.generativeai.types.HarmProbability.__mul__": true, + "google.generativeai.types.HarmProbability.__ne__": true, + "google.generativeai.types.HarmProbability.__neg__": true, + "google.generativeai.types.HarmProbability.__new__": true, + "google.generativeai.types.HarmProbability.__or__": true, + "google.generativeai.types.HarmProbability.__pos__": true, + "google.generativeai.types.HarmProbability.__pow__": true, + "google.generativeai.types.HarmProbability.__radd__": true, + "google.generativeai.types.HarmProbability.__rand__": true, + "google.generativeai.types.HarmProbability.__rfloordiv__": true, + "google.generativeai.types.HarmProbability.__rlshift__": true, + "google.generativeai.types.HarmProbability.__rmod__": true, + "google.generativeai.types.HarmProbability.__rmul__": true, + "google.generativeai.types.HarmProbability.__ror__": true, + "google.generativeai.types.HarmProbability.__rpow__": true, + "google.generativeai.types.HarmProbability.__rrshift__": true, + "google.generativeai.types.HarmProbability.__rshift__": true, + "google.generativeai.types.HarmProbability.__rsub__": true, + "google.generativeai.types.HarmProbability.__rtruediv__": true, + "google.generativeai.types.HarmProbability.__rxor__": true, + "google.generativeai.types.HarmProbability.__sub__": true, + "google.generativeai.types.HarmProbability.__truediv__": true, + "google.generativeai.types.HarmProbability.__xor__": true, + "google.generativeai.types.HarmProbability.as_integer_ratio": true, + "google.generativeai.types.HarmProbability.bit_count": true, + "google.generativeai.types.HarmProbability.bit_length": true, + "google.generativeai.types.HarmProbability.conjugate": true, + "google.generativeai.types.HarmProbability.denominator": true, + "google.generativeai.types.HarmProbability.from_bytes": true, + "google.generativeai.types.HarmProbability.imag": true, + "google.generativeai.types.HarmProbability.numerator": true, + "google.generativeai.types.HarmProbability.real": true, + "google.generativeai.types.HarmProbability.to_bytes": true, + "google.generativeai.types.IncompleteIterationError": false, + "google.generativeai.types.IncompleteIterationError.__eq__": true, + "google.generativeai.types.IncompleteIterationError.__ge__": true, + "google.generativeai.types.IncompleteIterationError.__gt__": true, + "google.generativeai.types.IncompleteIterationError.__init__": true, + "google.generativeai.types.IncompleteIterationError.__le__": true, + "google.generativeai.types.IncompleteIterationError.__lt__": true, + "google.generativeai.types.IncompleteIterationError.__ne__": true, + "google.generativeai.types.IncompleteIterationError.__new__": true, + "google.generativeai.types.IncompleteIterationError.add_note": true, + "google.generativeai.types.IncompleteIterationError.args": true, + "google.generativeai.types.IncompleteIterationError.with_traceback": true, + "google.generativeai.types.MessageDict": false, + "google.generativeai.types.MessageDict.__contains__": true, + "google.generativeai.types.MessageDict.__eq__": true, + "google.generativeai.types.MessageDict.__ge__": true, + "google.generativeai.types.MessageDict.__getitem__": true, + "google.generativeai.types.MessageDict.__gt__": true, + "google.generativeai.types.MessageDict.__init__": true, + "google.generativeai.types.MessageDict.__iter__": true, + "google.generativeai.types.MessageDict.__le__": true, + "google.generativeai.types.MessageDict.__len__": true, + "google.generativeai.types.MessageDict.__lt__": true, + "google.generativeai.types.MessageDict.__ne__": true, + "google.generativeai.types.MessageDict.__new__": true, + "google.generativeai.types.MessageDict.__or__": true, + "google.generativeai.types.MessageDict.__ror__": true, + "google.generativeai.types.MessageDict.clear": true, + "google.generativeai.types.MessageDict.copy": true, + "google.generativeai.types.MessageDict.fromkeys": true, + "google.generativeai.types.MessageDict.get": true, + "google.generativeai.types.MessageDict.items": true, + "google.generativeai.types.MessageDict.keys": true, + "google.generativeai.types.MessageDict.pop": true, + "google.generativeai.types.MessageDict.popitem": true, + "google.generativeai.types.MessageDict.setdefault": true, + "google.generativeai.types.MessageDict.update": true, + "google.generativeai.types.MessageDict.values": true, + "google.generativeai.types.MessageOptions": false, + "google.generativeai.types.MessagePromptDict": false, + "google.generativeai.types.MessagePromptDict.__contains__": true, + "google.generativeai.types.MessagePromptDict.__eq__": true, + "google.generativeai.types.MessagePromptDict.__ge__": true, + "google.generativeai.types.MessagePromptDict.__getitem__": true, + "google.generativeai.types.MessagePromptDict.__gt__": true, + "google.generativeai.types.MessagePromptDict.__init__": true, + "google.generativeai.types.MessagePromptDict.__iter__": true, + "google.generativeai.types.MessagePromptDict.__le__": true, + "google.generativeai.types.MessagePromptDict.__len__": true, + "google.generativeai.types.MessagePromptDict.__lt__": true, + "google.generativeai.types.MessagePromptDict.__ne__": true, + "google.generativeai.types.MessagePromptDict.__new__": true, + "google.generativeai.types.MessagePromptDict.__or__": true, + "google.generativeai.types.MessagePromptDict.__ror__": true, + "google.generativeai.types.MessagePromptDict.clear": true, + "google.generativeai.types.MessagePromptDict.copy": true, + "google.generativeai.types.MessagePromptDict.fromkeys": true, + "google.generativeai.types.MessagePromptDict.get": true, + "google.generativeai.types.MessagePromptDict.items": true, + "google.generativeai.types.MessagePromptDict.keys": true, + "google.generativeai.types.MessagePromptDict.pop": true, + "google.generativeai.types.MessagePromptDict.popitem": true, + "google.generativeai.types.MessagePromptDict.setdefault": true, + "google.generativeai.types.MessagePromptDict.update": true, + "google.generativeai.types.MessagePromptDict.values": true, + "google.generativeai.types.MessagePromptOptions": false, + "google.generativeai.types.MessagesOptions": false, + "google.generativeai.types.Model": false, + "google.generativeai.types.Model.__eq__": true, + "google.generativeai.types.Model.__ge__": true, + "google.generativeai.types.Model.__gt__": true, + "google.generativeai.types.Model.__init__": true, + "google.generativeai.types.Model.__le__": true, + "google.generativeai.types.Model.__lt__": true, + "google.generativeai.types.Model.__ne__": true, + "google.generativeai.types.Model.__new__": true, + "google.generativeai.types.Model.max_temperature": true, + "google.generativeai.types.Model.temperature": true, + "google.generativeai.types.Model.top_k": true, + "google.generativeai.types.Model.top_p": true, + "google.generativeai.types.ModelNameOptions": false, + "google.generativeai.types.ModelsIterable": false, + "google.generativeai.types.PartDict": false, + "google.generativeai.types.PartDict.__contains__": true, + "google.generativeai.types.PartDict.__eq__": true, + "google.generativeai.types.PartDict.__ge__": true, + "google.generativeai.types.PartDict.__getitem__": true, + "google.generativeai.types.PartDict.__gt__": true, + "google.generativeai.types.PartDict.__init__": true, + "google.generativeai.types.PartDict.__iter__": true, + "google.generativeai.types.PartDict.__le__": true, + "google.generativeai.types.PartDict.__len__": true, + "google.generativeai.types.PartDict.__lt__": true, + "google.generativeai.types.PartDict.__ne__": true, + "google.generativeai.types.PartDict.__new__": true, + "google.generativeai.types.PartDict.__or__": true, + "google.generativeai.types.PartDict.__ror__": true, + "google.generativeai.types.PartDict.clear": true, + "google.generativeai.types.PartDict.copy": true, + "google.generativeai.types.PartDict.fromkeys": true, + "google.generativeai.types.PartDict.get": true, + "google.generativeai.types.PartDict.items": true, + "google.generativeai.types.PartDict.keys": true, + "google.generativeai.types.PartDict.pop": true, + "google.generativeai.types.PartDict.popitem": true, + "google.generativeai.types.PartDict.setdefault": true, + "google.generativeai.types.PartDict.update": true, + "google.generativeai.types.PartDict.values": true, + "google.generativeai.types.PartType": false, + "google.generativeai.types.Permission": false, + "google.generativeai.types.Permission.__eq__": true, + "google.generativeai.types.Permission.__ge__": true, + "google.generativeai.types.Permission.__gt__": true, + "google.generativeai.types.Permission.__init__": true, + "google.generativeai.types.Permission.__le__": true, + "google.generativeai.types.Permission.__lt__": true, + "google.generativeai.types.Permission.__ne__": true, + "google.generativeai.types.Permission.__new__": true, + "google.generativeai.types.Permission.delete": true, + "google.generativeai.types.Permission.delete_async": true, + "google.generativeai.types.Permission.email_address": true, + "google.generativeai.types.Permission.get": true, + "google.generativeai.types.Permission.get_async": true, + "google.generativeai.types.Permission.to_dict": true, + "google.generativeai.types.Permission.update": true, + "google.generativeai.types.Permission.update_async": true, + "google.generativeai.types.Permissions": false, + "google.generativeai.types.Permissions.__eq__": true, + "google.generativeai.types.Permissions.__ge__": true, + "google.generativeai.types.Permissions.__gt__": true, + "google.generativeai.types.Permissions.__init__": true, + "google.generativeai.types.Permissions.__iter__": true, + "google.generativeai.types.Permissions.__le__": true, + "google.generativeai.types.Permissions.__lt__": true, + "google.generativeai.types.Permissions.__ne__": true, + "google.generativeai.types.Permissions.__new__": true, + "google.generativeai.types.Permissions.create": true, + "google.generativeai.types.Permissions.create_async": true, + "google.generativeai.types.Permissions.get": true, + "google.generativeai.types.Permissions.get_async": true, + "google.generativeai.types.Permissions.list": true, + "google.generativeai.types.Permissions.list_async": true, + "google.generativeai.types.Permissions.parent": true, + "google.generativeai.types.Permissions.transfer_ownership": true, + "google.generativeai.types.Permissions.transfer_ownership_async": true, + "google.generativeai.types.RequestOptions": false, + "google.generativeai.types.RequestOptions.__contains__": true, + "google.generativeai.types.RequestOptions.__eq__": true, + "google.generativeai.types.RequestOptions.__ge__": true, + "google.generativeai.types.RequestOptions.__getitem__": true, + "google.generativeai.types.RequestOptions.__gt__": true, + "google.generativeai.types.RequestOptions.__init__": true, + "google.generativeai.types.RequestOptions.__iter__": true, + "google.generativeai.types.RequestOptions.__le__": true, + "google.generativeai.types.RequestOptions.__len__": true, + "google.generativeai.types.RequestOptions.__lt__": true, + "google.generativeai.types.RequestOptions.__ne__": true, + "google.generativeai.types.RequestOptions.__new__": true, + "google.generativeai.types.RequestOptions.get": true, + "google.generativeai.types.RequestOptions.items": true, + "google.generativeai.types.RequestOptions.keys": true, + "google.generativeai.types.RequestOptions.values": true, + "google.generativeai.types.RequestOptionsType": false, + "google.generativeai.types.ResponseDict": false, + "google.generativeai.types.ResponseDict.__contains__": true, + "google.generativeai.types.ResponseDict.__eq__": true, + "google.generativeai.types.ResponseDict.__ge__": true, + "google.generativeai.types.ResponseDict.__getitem__": true, + "google.generativeai.types.ResponseDict.__gt__": true, + "google.generativeai.types.ResponseDict.__init__": true, + "google.generativeai.types.ResponseDict.__iter__": true, + "google.generativeai.types.ResponseDict.__le__": true, + "google.generativeai.types.ResponseDict.__len__": true, + "google.generativeai.types.ResponseDict.__lt__": true, + "google.generativeai.types.ResponseDict.__ne__": true, + "google.generativeai.types.ResponseDict.__new__": true, + "google.generativeai.types.ResponseDict.__or__": true, + "google.generativeai.types.ResponseDict.__ror__": true, + "google.generativeai.types.ResponseDict.clear": true, + "google.generativeai.types.ResponseDict.copy": true, + "google.generativeai.types.ResponseDict.fromkeys": true, + "google.generativeai.types.ResponseDict.get": true, + "google.generativeai.types.ResponseDict.items": true, + "google.generativeai.types.ResponseDict.keys": true, + "google.generativeai.types.ResponseDict.pop": true, + "google.generativeai.types.ResponseDict.popitem": true, + "google.generativeai.types.ResponseDict.setdefault": true, + "google.generativeai.types.ResponseDict.update": true, + "google.generativeai.types.ResponseDict.values": true, + "google.generativeai.types.SafetyFeedbackDict": false, + "google.generativeai.types.SafetyFeedbackDict.__contains__": true, + "google.generativeai.types.SafetyFeedbackDict.__eq__": true, + "google.generativeai.types.SafetyFeedbackDict.__ge__": true, + "google.generativeai.types.SafetyFeedbackDict.__getitem__": true, + "google.generativeai.types.SafetyFeedbackDict.__gt__": true, + "google.generativeai.types.SafetyFeedbackDict.__init__": true, + "google.generativeai.types.SafetyFeedbackDict.__iter__": true, + "google.generativeai.types.SafetyFeedbackDict.__le__": true, + "google.generativeai.types.SafetyFeedbackDict.__len__": true, + "google.generativeai.types.SafetyFeedbackDict.__lt__": true, + "google.generativeai.types.SafetyFeedbackDict.__ne__": true, + "google.generativeai.types.SafetyFeedbackDict.__new__": true, + "google.generativeai.types.SafetyFeedbackDict.__or__": true, + "google.generativeai.types.SafetyFeedbackDict.__ror__": true, + "google.generativeai.types.SafetyFeedbackDict.clear": true, + "google.generativeai.types.SafetyFeedbackDict.copy": true, + "google.generativeai.types.SafetyFeedbackDict.fromkeys": true, + "google.generativeai.types.SafetyFeedbackDict.get": true, + "google.generativeai.types.SafetyFeedbackDict.items": true, + "google.generativeai.types.SafetyFeedbackDict.keys": true, + "google.generativeai.types.SafetyFeedbackDict.pop": true, + "google.generativeai.types.SafetyFeedbackDict.popitem": true, + "google.generativeai.types.SafetyFeedbackDict.setdefault": true, + "google.generativeai.types.SafetyFeedbackDict.update": true, + "google.generativeai.types.SafetyFeedbackDict.values": true, + "google.generativeai.types.SafetyRatingDict": false, + "google.generativeai.types.SafetyRatingDict.__contains__": true, + "google.generativeai.types.SafetyRatingDict.__eq__": true, + "google.generativeai.types.SafetyRatingDict.__ge__": true, + "google.generativeai.types.SafetyRatingDict.__getitem__": true, + "google.generativeai.types.SafetyRatingDict.__gt__": true, + "google.generativeai.types.SafetyRatingDict.__init__": true, + "google.generativeai.types.SafetyRatingDict.__iter__": true, + "google.generativeai.types.SafetyRatingDict.__le__": true, + "google.generativeai.types.SafetyRatingDict.__len__": true, + "google.generativeai.types.SafetyRatingDict.__lt__": true, + "google.generativeai.types.SafetyRatingDict.__ne__": true, + "google.generativeai.types.SafetyRatingDict.__new__": true, + "google.generativeai.types.SafetyRatingDict.__or__": true, + "google.generativeai.types.SafetyRatingDict.__ror__": true, + "google.generativeai.types.SafetyRatingDict.clear": true, + "google.generativeai.types.SafetyRatingDict.copy": true, + "google.generativeai.types.SafetyRatingDict.fromkeys": true, + "google.generativeai.types.SafetyRatingDict.get": true, + "google.generativeai.types.SafetyRatingDict.items": true, + "google.generativeai.types.SafetyRatingDict.keys": true, + "google.generativeai.types.SafetyRatingDict.pop": true, + "google.generativeai.types.SafetyRatingDict.popitem": true, + "google.generativeai.types.SafetyRatingDict.setdefault": true, + "google.generativeai.types.SafetyRatingDict.update": true, + "google.generativeai.types.SafetyRatingDict.values": true, + "google.generativeai.types.SafetySettingDict": false, + "google.generativeai.types.SafetySettingDict.__contains__": true, + "google.generativeai.types.SafetySettingDict.__eq__": true, + "google.generativeai.types.SafetySettingDict.__ge__": true, + "google.generativeai.types.SafetySettingDict.__getitem__": true, + "google.generativeai.types.SafetySettingDict.__gt__": true, + "google.generativeai.types.SafetySettingDict.__init__": true, + "google.generativeai.types.SafetySettingDict.__iter__": true, + "google.generativeai.types.SafetySettingDict.__le__": true, + "google.generativeai.types.SafetySettingDict.__len__": true, + "google.generativeai.types.SafetySettingDict.__lt__": true, + "google.generativeai.types.SafetySettingDict.__ne__": true, + "google.generativeai.types.SafetySettingDict.__new__": true, + "google.generativeai.types.SafetySettingDict.__or__": true, + "google.generativeai.types.SafetySettingDict.__ror__": true, + "google.generativeai.types.SafetySettingDict.clear": true, + "google.generativeai.types.SafetySettingDict.copy": true, + "google.generativeai.types.SafetySettingDict.fromkeys": true, + "google.generativeai.types.SafetySettingDict.get": true, + "google.generativeai.types.SafetySettingDict.items": true, + "google.generativeai.types.SafetySettingDict.keys": true, + "google.generativeai.types.SafetySettingDict.pop": true, + "google.generativeai.types.SafetySettingDict.popitem": true, + "google.generativeai.types.SafetySettingDict.setdefault": true, + "google.generativeai.types.SafetySettingDict.update": true, + "google.generativeai.types.SafetySettingDict.values": true, + "google.generativeai.types.Status": false, + "google.generativeai.types.Status.ByteSize": true, + "google.generativeai.types.Status.Clear": true, + "google.generativeai.types.Status.ClearExtension": true, + "google.generativeai.types.Status.ClearField": true, + "google.generativeai.types.Status.CopyFrom": true, + "google.generativeai.types.Status.DESCRIPTOR": true, + "google.generativeai.types.Status.DiscardUnknownFields": true, + "google.generativeai.types.Status.Extensions": true, + "google.generativeai.types.Status.FindInitializationErrors": true, + "google.generativeai.types.Status.FromString": true, + "google.generativeai.types.Status.HasExtension": true, + "google.generativeai.types.Status.HasField": true, + "google.generativeai.types.Status.IsInitialized": true, + "google.generativeai.types.Status.ListFields": true, + "google.generativeai.types.Status.MergeFrom": true, + "google.generativeai.types.Status.MergeFromString": true, + "google.generativeai.types.Status.ParseFromString": true, + "google.generativeai.types.Status.RegisterExtension": true, + "google.generativeai.types.Status.SerializePartialToString": true, + "google.generativeai.types.Status.SerializeToString": true, + "google.generativeai.types.Status.SetInParent": true, + "google.generativeai.types.Status.UnknownFields": true, + "google.generativeai.types.Status.WhichOneof": true, + "google.generativeai.types.Status.__eq__": true, + "google.generativeai.types.Status.__ge__": true, + "google.generativeai.types.Status.__gt__": true, + "google.generativeai.types.Status.__init__": true, + "google.generativeai.types.Status.__le__": true, + "google.generativeai.types.Status.__lt__": true, + "google.generativeai.types.Status.__ne__": true, + "google.generativeai.types.Status.__new__": true, + "google.generativeai.types.Status.code": true, + "google.generativeai.types.Status.details": true, + "google.generativeai.types.Status.message": true, + "google.generativeai.types.StopCandidateException": false, + "google.generativeai.types.StopCandidateException.__eq__": true, + "google.generativeai.types.StopCandidateException.__ge__": true, + "google.generativeai.types.StopCandidateException.__gt__": true, + "google.generativeai.types.StopCandidateException.__init__": true, + "google.generativeai.types.StopCandidateException.__le__": true, + "google.generativeai.types.StopCandidateException.__lt__": true, + "google.generativeai.types.StopCandidateException.__ne__": true, + "google.generativeai.types.StopCandidateException.__new__": true, + "google.generativeai.types.StopCandidateException.add_note": true, + "google.generativeai.types.StopCandidateException.args": true, + "google.generativeai.types.StopCandidateException.with_traceback": true, + "google.generativeai.types.StrictContentType": false, + "google.generativeai.types.Tool": false, + "google.generativeai.types.Tool.__call__": true, + "google.generativeai.types.Tool.__eq__": true, + "google.generativeai.types.Tool.__ge__": true, + "google.generativeai.types.Tool.__getitem__": true, + "google.generativeai.types.Tool.__gt__": true, + "google.generativeai.types.Tool.__init__": true, + "google.generativeai.types.Tool.__le__": true, + "google.generativeai.types.Tool.__lt__": true, + "google.generativeai.types.Tool.__ne__": true, + "google.generativeai.types.Tool.__new__": true, + "google.generativeai.types.Tool.code_execution": true, + "google.generativeai.types.Tool.function_declarations": true, + "google.generativeai.types.Tool.to_proto": true, + "google.generativeai.types.ToolDict": false, + "google.generativeai.types.ToolDict.__contains__": true, + "google.generativeai.types.ToolDict.__eq__": true, + "google.generativeai.types.ToolDict.__ge__": true, + "google.generativeai.types.ToolDict.__getitem__": true, + "google.generativeai.types.ToolDict.__gt__": true, + "google.generativeai.types.ToolDict.__init__": true, + "google.generativeai.types.ToolDict.__iter__": true, + "google.generativeai.types.ToolDict.__le__": true, + "google.generativeai.types.ToolDict.__len__": true, + "google.generativeai.types.ToolDict.__lt__": true, + "google.generativeai.types.ToolDict.__ne__": true, + "google.generativeai.types.ToolDict.__new__": true, + "google.generativeai.types.ToolDict.__or__": true, + "google.generativeai.types.ToolDict.__ror__": true, + "google.generativeai.types.ToolDict.clear": true, + "google.generativeai.types.ToolDict.copy": true, + "google.generativeai.types.ToolDict.fromkeys": true, + "google.generativeai.types.ToolDict.get": true, + "google.generativeai.types.ToolDict.items": true, + "google.generativeai.types.ToolDict.keys": true, + "google.generativeai.types.ToolDict.pop": true, + "google.generativeai.types.ToolDict.popitem": true, + "google.generativeai.types.ToolDict.setdefault": true, + "google.generativeai.types.ToolDict.update": true, + "google.generativeai.types.ToolDict.values": true, + "google.generativeai.types.ToolsType": false, + "google.generativeai.types.TunedModel": false, + "google.generativeai.types.TunedModel.__eq__": true, + "google.generativeai.types.TunedModel.__ge__": true, + "google.generativeai.types.TunedModel.__gt__": true, + "google.generativeai.types.TunedModel.__init__": true, + "google.generativeai.types.TunedModel.__le__": true, + "google.generativeai.types.TunedModel.__lt__": true, + "google.generativeai.types.TunedModel.__ne__": true, + "google.generativeai.types.TunedModel.__new__": true, + "google.generativeai.types.TunedModel.base_model": true, + "google.generativeai.types.TunedModel.create_time": true, + "google.generativeai.types.TunedModel.description": true, + "google.generativeai.types.TunedModel.display_name": true, + "google.generativeai.types.TunedModel.name": true, + "google.generativeai.types.TunedModel.permissions": true, + "google.generativeai.types.TunedModel.source_model": true, + "google.generativeai.types.TunedModel.state": true, + "google.generativeai.types.TunedModel.temperature": true, + "google.generativeai.types.TunedModel.top_k": true, + "google.generativeai.types.TunedModel.top_p": true, + "google.generativeai.types.TunedModel.tuning_task": true, + "google.generativeai.types.TunedModel.update_time": true, + "google.generativeai.types.TunedModelNameOptions": false, + "google.generativeai.types.TunedModelState": false, + "google.generativeai.types.TunedModelState.ACTIVE": true, + "google.generativeai.types.TunedModelState.CREATING": true, + "google.generativeai.types.TunedModelState.FAILED": true, + "google.generativeai.types.TunedModelState.STATE_UNSPECIFIED": true, + "google.generativeai.types.TunedModelState.__abs__": true, + "google.generativeai.types.TunedModelState.__add__": true, + "google.generativeai.types.TunedModelState.__and__": true, + "google.generativeai.types.TunedModelState.__bool__": true, + "google.generativeai.types.TunedModelState.__contains__": true, + "google.generativeai.types.TunedModelState.__eq__": true, + "google.generativeai.types.TunedModelState.__floordiv__": true, + "google.generativeai.types.TunedModelState.__ge__": true, + "google.generativeai.types.TunedModelState.__getitem__": true, + "google.generativeai.types.TunedModelState.__gt__": true, + "google.generativeai.types.TunedModelState.__init__": true, + "google.generativeai.types.TunedModelState.__invert__": true, + "google.generativeai.types.TunedModelState.__iter__": true, + "google.generativeai.types.TunedModelState.__le__": true, + "google.generativeai.types.TunedModelState.__len__": true, + "google.generativeai.types.TunedModelState.__lshift__": true, + "google.generativeai.types.TunedModelState.__lt__": true, + "google.generativeai.types.TunedModelState.__mod__": true, + "google.generativeai.types.TunedModelState.__mul__": true, + "google.generativeai.types.TunedModelState.__ne__": true, + "google.generativeai.types.TunedModelState.__neg__": true, + "google.generativeai.types.TunedModelState.__new__": true, + "google.generativeai.types.TunedModelState.__or__": true, + "google.generativeai.types.TunedModelState.__pos__": true, + "google.generativeai.types.TunedModelState.__pow__": true, + "google.generativeai.types.TunedModelState.__radd__": true, + "google.generativeai.types.TunedModelState.__rand__": true, + "google.generativeai.types.TunedModelState.__rfloordiv__": true, + "google.generativeai.types.TunedModelState.__rlshift__": true, + "google.generativeai.types.TunedModelState.__rmod__": true, + "google.generativeai.types.TunedModelState.__rmul__": true, + "google.generativeai.types.TunedModelState.__ror__": true, + "google.generativeai.types.TunedModelState.__rpow__": true, + "google.generativeai.types.TunedModelState.__rrshift__": true, + "google.generativeai.types.TunedModelState.__rshift__": true, + "google.generativeai.types.TunedModelState.__rsub__": true, + "google.generativeai.types.TunedModelState.__rtruediv__": true, + "google.generativeai.types.TunedModelState.__rxor__": true, + "google.generativeai.types.TunedModelState.__sub__": true, + "google.generativeai.types.TunedModelState.__truediv__": true, + "google.generativeai.types.TunedModelState.__xor__": true, + "google.generativeai.types.TunedModelState.as_integer_ratio": true, + "google.generativeai.types.TunedModelState.bit_count": true, + "google.generativeai.types.TunedModelState.bit_length": true, + "google.generativeai.types.TunedModelState.conjugate": true, + "google.generativeai.types.TunedModelState.denominator": true, + "google.generativeai.types.TunedModelState.from_bytes": true, + "google.generativeai.types.TunedModelState.imag": true, + "google.generativeai.types.TunedModelState.numerator": true, + "google.generativeai.types.TunedModelState.real": true, + "google.generativeai.types.TunedModelState.to_bytes": true, + "google.generativeai.types.TypedDict": false, + "google.generativeai.types.annotations": true, + "google.generativeai.types.get_default_file_client": false, + "google.generativeai.types.to_file_data": false, + "google.generativeai.update_tuned_model": false, + "google.generativeai.upload_file": false + }, + "link_prefix": null, + "physical_path": { + "google.generativeai": "google.generativeai", + "google.generativeai.ChatSession": "google.generativeai.generative_models.ChatSession", + "google.generativeai.ChatSession.__init__": "google.generativeai.generative_models.ChatSession.__init__", + "google.generativeai.ChatSession.rewind": "google.generativeai.generative_models.ChatSession.rewind", + "google.generativeai.ChatSession.send_message": "google.generativeai.generative_models.ChatSession.send_message", + "google.generativeai.ChatSession.send_message_async": "google.generativeai.generative_models.ChatSession.send_message_async", + "google.generativeai.GenerativeModel": "google.generativeai.generative_models.GenerativeModel", + "google.generativeai.GenerativeModel.__init__": "google.generativeai.generative_models.GenerativeModel.__init__", + "google.generativeai.GenerativeModel.count_tokens": "google.generativeai.generative_models.GenerativeModel.count_tokens", + "google.generativeai.GenerativeModel.count_tokens_async": "google.generativeai.generative_models.GenerativeModel.count_tokens_async", + "google.generativeai.GenerativeModel.from_cached_content": "google.generativeai.generative_models.GenerativeModel.from_cached_content", + "google.generativeai.GenerativeModel.generate_content": "google.generativeai.generative_models.GenerativeModel.generate_content", + "google.generativeai.GenerativeModel.generate_content_async": "google.generativeai.generative_models.GenerativeModel.generate_content_async", + "google.generativeai.GenerativeModel.start_chat": "google.generativeai.generative_models.GenerativeModel.start_chat", + "google.generativeai.chat": "google.generativeai.discuss.chat", + "google.generativeai.chat_async": "google.generativeai.discuss.chat_async", + "google.generativeai.configure": "google.generativeai.client.configure", + "google.generativeai.count_message_tokens": "google.generativeai.discuss.count_message_tokens", + "google.generativeai.count_text_tokens": "google.generativeai.text.count_text_tokens", + "google.generativeai.create_tuned_model": "google.generativeai.models.create_tuned_model", + "google.generativeai.delete_file": "google.generativeai.files.delete_file", + "google.generativeai.delete_tuned_model": "google.generativeai.models.delete_tuned_model", + "google.generativeai.embed_content": "google.generativeai.embedding.embed_content", + "google.generativeai.embed_content_async": "google.generativeai.embedding.embed_content_async", + "google.generativeai.generate_embeddings": "google.generativeai.text.generate_embeddings", + "google.generativeai.generate_text": "google.generativeai.text.generate_text", + "google.generativeai.get_base_model": "google.generativeai.models.get_base_model", + "google.generativeai.get_file": "google.generativeai.files.get_file", + "google.generativeai.get_model": "google.generativeai.models.get_model", + "google.generativeai.get_operation": "google.generativeai.operations.get_operation", + "google.generativeai.get_tuned_model": "google.generativeai.models.get_tuned_model", + "google.generativeai.list_files": "google.generativeai.files.list_files", + "google.generativeai.list_models": "google.generativeai.models.list_models", + "google.generativeai.list_operations": "google.generativeai.operations.list_operations", + "google.generativeai.list_tuned_models": "google.generativeai.models.list_tuned_models", + "google.generativeai.protos": "google.generativeai.protos", + "google.generativeai.protos.AttributionSourceId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.GroundingPassageId", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": "proto.message.Message.__eq__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": "proto.message.Message.__init__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": "proto.message.Message.__ne__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.SemanticRetrieverChunk", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.AttributionSourceId.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.AttributionSourceId.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.AttributionSourceId.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.AttributionSourceId.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.AttributionSourceId.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.AttributionSourceId.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.AttributionSourceId.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.AttributionSourceId.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchCreateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksRequest", + "google.generativeai.protos.BatchCreateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchCreateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchCreateChunksRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchCreateChunksRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchCreateChunksRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchCreateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchCreateChunksRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchCreateChunksRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchCreateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksResponse", + "google.generativeai.protos.BatchCreateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchCreateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchCreateChunksResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchCreateChunksResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchCreateChunksResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchCreateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchCreateChunksResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchCreateChunksResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchDeleteChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchDeleteChunksRequest", + "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchDeleteChunksRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchDeleteChunksRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchDeleteChunksRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchDeleteChunksRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchDeleteChunksRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchEmbedContentsRequest": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsRequest", + "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchEmbedContentsRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchEmbedContentsRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchEmbedContentsRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchEmbedContentsRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchEmbedContentsRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchEmbedContentsResponse": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsResponse", + "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchEmbedContentsResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchEmbedContentsResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchEmbedContentsResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchEmbedContentsResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchEmbedContentsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchEmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextRequest", + "google.generativeai.protos.BatchEmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchEmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchEmbedTextRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchEmbedTextRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchEmbedTextRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchEmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchEmbedTextRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchEmbedTextRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchEmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextResponse", + "google.generativeai.protos.BatchEmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchEmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchEmbedTextResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchEmbedTextResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchEmbedTextResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchEmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchEmbedTextResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchEmbedTextResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchUpdateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksRequest", + "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchUpdateChunksRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchUpdateChunksRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchUpdateChunksRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchUpdateChunksRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchUpdateChunksRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.BatchUpdateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksResponse", + "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.BatchUpdateChunksResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.BatchUpdateChunksResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.BatchUpdateChunksResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.BatchUpdateChunksResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.BatchUpdateChunksResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Blob": "google.ai.generativelanguage_v1beta.types.content.Blob", + "google.generativeai.protos.Blob.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Blob.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Blob.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Blob.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Blob.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Blob.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Blob.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Blob.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CachedContent": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent", + "google.generativeai.protos.CachedContent.UsageMetadata": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent.UsageMetadata", + "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CachedContent.UsageMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CachedContent.UsageMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CachedContent.UsageMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CachedContent.UsageMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CachedContent.UsageMetadata.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CachedContent.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CachedContent.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CachedContent.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CachedContent.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CachedContent.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CachedContent.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CachedContent.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CachedContent.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate", + "google.generativeai.protos.Candidate.FinishReason": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate.FinishReason", + "google.generativeai.protos.Candidate.FinishReason.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.Candidate.FinishReason.__eq__": "proto.enums.Enum.__eq__", + "google.generativeai.protos.Candidate.FinishReason.__ge__": "proto.enums.Enum.__ge__", + "google.generativeai.protos.Candidate.FinishReason.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.Candidate.FinishReason.__gt__": "proto.enums.Enum.__gt__", + "google.generativeai.protos.Candidate.FinishReason.__init__": "enum.Enum.__init__", + "google.generativeai.protos.Candidate.FinishReason.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.Candidate.FinishReason.__le__": "proto.enums.Enum.__le__", + "google.generativeai.protos.Candidate.FinishReason.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.Candidate.FinishReason.__lt__": "proto.enums.Enum.__lt__", + "google.generativeai.protos.Candidate.FinishReason.__ne__": "proto.enums.Enum.__ne__", + "google.generativeai.protos.Candidate.FinishReason.__new__": "enum.Enum.__new__", + "google.generativeai.protos.Candidate.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Candidate.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Candidate.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Candidate.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Candidate.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Candidate.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Candidate.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Candidate.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Chunk": "google.ai.generativelanguage_v1beta.types.retriever.Chunk", + "google.generativeai.protos.Chunk.State": "google.ai.generativelanguage_v1beta.types.retriever.Chunk.State", + "google.generativeai.protos.Chunk.State.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.Chunk.State.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.Chunk.State.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.Chunk.State.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.Chunk.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Chunk.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Chunk.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Chunk.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Chunk.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Chunk.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Chunk.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Chunk.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ChunkData": "google.ai.generativelanguage_v1beta.types.retriever.ChunkData", + "google.generativeai.protos.ChunkData.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ChunkData.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ChunkData.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ChunkData.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ChunkData.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ChunkData.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ChunkData.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ChunkData.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CitationMetadata": "google.ai.generativelanguage_v1beta.types.citation.CitationMetadata", + "google.generativeai.protos.CitationMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CitationMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CitationMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CitationMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CitationMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CitationMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CitationMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CitationMetadata.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CitationSource": "google.ai.generativelanguage_v1beta.types.citation.CitationSource", + "google.generativeai.protos.CitationSource.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CitationSource.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CitationSource.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CitationSource.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CitationSource.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CitationSource.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CitationSource.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CitationSource.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CodeExecution": "google.ai.generativelanguage_v1beta.types.content.CodeExecution", + "google.generativeai.protos.CodeExecution.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CodeExecution.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CodeExecution.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CodeExecution.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CodeExecution.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CodeExecution.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CodeExecution.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CodeExecution.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CodeExecutionResult": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult", + "google.generativeai.protos.CodeExecutionResult.Outcome": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult.Outcome", + "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.CodeExecutionResult.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CodeExecutionResult.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CodeExecutionResult.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CodeExecutionResult.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CodeExecutionResult.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CodeExecutionResult.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CodeExecutionResult.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CodeExecutionResult.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Condition": "google.ai.generativelanguage_v1beta.types.retriever.Condition", + "google.generativeai.protos.Condition.Operator": "google.ai.generativelanguage_v1beta.types.retriever.Condition.Operator", + "google.generativeai.protos.Condition.Operator.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.Condition.Operator.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.Condition.Operator.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.Condition.Operator.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.Condition.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Condition.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Condition.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Condition.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Condition.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Condition.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Condition.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Condition.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Content": "google.ai.generativelanguage_v1beta.types.content.Content", + "google.generativeai.protos.Content.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Content.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Content.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Content.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Content.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Content.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Content.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Content.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ContentEmbedding": "google.ai.generativelanguage_v1beta.types.generative_service.ContentEmbedding", + "google.generativeai.protos.ContentEmbedding.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ContentEmbedding.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ContentEmbedding.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ContentEmbedding.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ContentEmbedding.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ContentEmbedding.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ContentEmbedding.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ContentEmbedding.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ContentFilter": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter", + "google.generativeai.protos.ContentFilter.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ContentFilter.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ContentFilter.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ContentFilter.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ContentFilter.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ContentFilter.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ContentFilter.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ContentFilter.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Corpus": "google.ai.generativelanguage_v1beta.types.retriever.Corpus", + "google.generativeai.protos.Corpus.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Corpus.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Corpus.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Corpus.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Corpus.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Corpus.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Corpus.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Corpus.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CountMessageTokensRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensRequest", + "google.generativeai.protos.CountMessageTokensRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CountMessageTokensRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CountMessageTokensRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CountMessageTokensRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CountMessageTokensRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CountMessageTokensRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CountMessageTokensRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CountMessageTokensRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CountMessageTokensResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensResponse", + "google.generativeai.protos.CountMessageTokensResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CountMessageTokensResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CountMessageTokensResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CountMessageTokensResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CountMessageTokensResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CountMessageTokensResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CountMessageTokensResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CountMessageTokensResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CountTextTokensRequest": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensRequest", + "google.generativeai.protos.CountTextTokensRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CountTextTokensRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CountTextTokensRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CountTextTokensRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CountTextTokensRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CountTextTokensRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CountTextTokensRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CountTextTokensRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CountTextTokensResponse": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensResponse", + "google.generativeai.protos.CountTextTokensResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CountTextTokensResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CountTextTokensResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CountTextTokensResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CountTextTokensResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CountTextTokensResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CountTextTokensResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CountTextTokensResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CountTokensRequest": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensRequest", + "google.generativeai.protos.CountTokensRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CountTokensRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CountTokensRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CountTokensRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CountTokensRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CountTokensRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CountTokensRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CountTokensRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CountTokensResponse": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensResponse", + "google.generativeai.protos.CountTokensResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CountTokensResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CountTokensResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CountTokensResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CountTokensResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CountTokensResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CountTokensResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CountTokensResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.CreateCachedContentRequest", + "google.generativeai.protos.CreateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateCachedContentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateChunkRequest", + "google.generativeai.protos.CreateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateChunkRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateChunkRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateChunkRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateChunkRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateChunkRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateCorpusRequest", + "google.generativeai.protos.CreateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateCorpusRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateCorpusRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateCorpusRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateCorpusRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateCorpusRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateDocumentRequest", + "google.generativeai.protos.CreateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateDocumentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateDocumentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateDocumentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateDocumentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateDocumentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileRequest", + "google.generativeai.protos.CreateFileRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateFileRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateFileRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateFileRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateFileRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateFileRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateFileRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateFileRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateFileResponse": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileResponse", + "google.generativeai.protos.CreateFileResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateFileResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateFileResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateFileResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateFileResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateFileResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateFileResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateFileResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.CreatePermissionRequest", + "google.generativeai.protos.CreatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreatePermissionRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreatePermissionRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreatePermissionRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreatePermissionRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreatePermissionRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateTunedModelMetadata": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelMetadata", + "google.generativeai.protos.CreateTunedModelMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateTunedModelMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateTunedModelMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateTunedModelMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateTunedModelMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateTunedModelMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateTunedModelMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateTunedModelMetadata.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CreateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelRequest", + "google.generativeai.protos.CreateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CreateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CreateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CreateTunedModelRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CreateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CreateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CreateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CreateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.CustomMetadata": "google.ai.generativelanguage_v1beta.types.retriever.CustomMetadata", + "google.generativeai.protos.CustomMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.CustomMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.CustomMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.CustomMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.CustomMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.CustomMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.CustomMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.CustomMetadata.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Dataset": "google.ai.generativelanguage_v1beta.types.tuned_model.Dataset", + "google.generativeai.protos.Dataset.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Dataset.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Dataset.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Dataset.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Dataset.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Dataset.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Dataset.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Dataset.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeleteCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.DeleteCachedContentRequest", + "google.generativeai.protos.DeleteCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeleteCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeleteCachedContentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeleteCachedContentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeleteCachedContentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeleteCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeleteCachedContentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeleteCachedContentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeleteChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteChunkRequest", + "google.generativeai.protos.DeleteChunkRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeleteChunkRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeleteChunkRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeleteChunkRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeleteChunkRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeleteChunkRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeleteChunkRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeleteChunkRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeleteCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteCorpusRequest", + "google.generativeai.protos.DeleteCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeleteCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeleteCorpusRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeleteCorpusRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeleteCorpusRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeleteCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeleteCorpusRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeleteCorpusRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeleteDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteDocumentRequest", + "google.generativeai.protos.DeleteDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeleteDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeleteDocumentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeleteDocumentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeleteDocumentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeleteDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeleteDocumentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeleteDocumentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeleteFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.DeleteFileRequest", + "google.generativeai.protos.DeleteFileRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeleteFileRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeleteFileRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeleteFileRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeleteFileRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeleteFileRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeleteFileRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeleteFileRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeletePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.DeletePermissionRequest", + "google.generativeai.protos.DeletePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeletePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeletePermissionRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeletePermissionRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeletePermissionRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeletePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeletePermissionRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeletePermissionRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DeleteTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.DeleteTunedModelRequest", + "google.generativeai.protos.DeleteTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DeleteTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DeleteTunedModelRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DeleteTunedModelRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DeleteTunedModelRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DeleteTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DeleteTunedModelRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DeleteTunedModelRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Document": "google.ai.generativelanguage_v1beta.types.retriever.Document", + "google.generativeai.protos.Document.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Document.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Document.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Document.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Document.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Document.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Document.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Document.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.EmbedContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentRequest", + "google.generativeai.protos.EmbedContentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.EmbedContentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.EmbedContentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.EmbedContentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.EmbedContentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.EmbedContentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.EmbedContentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.EmbedContentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.EmbedContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentResponse", + "google.generativeai.protos.EmbedContentResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.EmbedContentResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.EmbedContentResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.EmbedContentResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.EmbedContentResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.EmbedContentResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.EmbedContentResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.EmbedContentResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.EmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextRequest", + "google.generativeai.protos.EmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.EmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.EmbedTextRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.EmbedTextRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.EmbedTextRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.EmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.EmbedTextRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.EmbedTextRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.EmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextResponse", + "google.generativeai.protos.EmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.EmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.EmbedTextResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.EmbedTextResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.EmbedTextResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.EmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.EmbedTextResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.EmbedTextResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Embedding": "google.ai.generativelanguage_v1beta.types.text_service.Embedding", + "google.generativeai.protos.Embedding.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Embedding.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Embedding.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Embedding.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Embedding.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Embedding.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Embedding.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Embedding.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Example": "google.ai.generativelanguage_v1beta.types.discuss_service.Example", + "google.generativeai.protos.Example.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Example.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Example.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Example.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Example.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Example.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Example.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Example.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ExecutableCode": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode", + "google.generativeai.protos.ExecutableCode.Language": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode.Language", + "google.generativeai.protos.ExecutableCode.Language.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.ExecutableCode.Language.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.ExecutableCode.Language.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.ExecutableCode.Language.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.ExecutableCode.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ExecutableCode.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ExecutableCode.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ExecutableCode.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ExecutableCode.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ExecutableCode.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ExecutableCode.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ExecutableCode.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.File": "google.ai.generativelanguage_v1beta.types.file.File", + "google.generativeai.protos.File.State": "google.ai.generativelanguage_v1beta.types.file.File.State", + "google.generativeai.protos.File.State.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.File.State.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.File.State.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.File.State.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.File.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.File.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.File.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.File.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.File.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.File.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.File.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.File.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.FileData": "google.ai.generativelanguage_v1beta.types.content.FileData", + "google.generativeai.protos.FileData.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.FileData.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.FileData.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.FileData.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.FileData.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.FileData.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.FileData.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.FileData.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.FunctionCall": "google.ai.generativelanguage_v1beta.types.content.FunctionCall", + "google.generativeai.protos.FunctionCall.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.FunctionCall.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.FunctionCall.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.FunctionCall.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.FunctionCall.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.FunctionCall.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.FunctionCall.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.FunctionCall.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.FunctionCallingConfig": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig", + "google.generativeai.protos.FunctionCallingConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig.Mode", + "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.FunctionCallingConfig.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.FunctionCallingConfig.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.FunctionCallingConfig.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.FunctionCallingConfig.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.FunctionCallingConfig.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.FunctionCallingConfig.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.FunctionCallingConfig.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.FunctionCallingConfig.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.FunctionDeclaration": "google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration", + "google.generativeai.protos.FunctionDeclaration.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.FunctionDeclaration.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.FunctionDeclaration.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.FunctionDeclaration.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.FunctionDeclaration.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.FunctionDeclaration.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.FunctionDeclaration.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.FunctionDeclaration.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.FunctionResponse": "google.ai.generativelanguage_v1beta.types.content.FunctionResponse", + "google.generativeai.protos.FunctionResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.FunctionResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.FunctionResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.FunctionResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.FunctionResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.FunctionResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.FunctionResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.FunctionResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateAnswerRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest.AnswerStyle", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.GenerateAnswerRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateAnswerRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateAnswerRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateAnswerRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateAnswerRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateAnswerRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateAnswerRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateAnswerRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateAnswerResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback.BlockReason", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateAnswerResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateAnswerResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateAnswerResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateAnswerResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateAnswerResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateAnswerResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateAnswerResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateAnswerResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentRequest", + "google.generativeai.protos.GenerateContentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateContentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateContentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateContentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateContentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateContentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateContentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateContentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback.BlockReason", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.UsageMetadata", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateContentResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateContentResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateContentResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateContentResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateContentResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateContentResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateContentResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateContentResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateMessageRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageRequest", + "google.generativeai.protos.GenerateMessageRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateMessageRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateMessageRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateMessageRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateMessageRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateMessageRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateMessageRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateMessageRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateMessageResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageResponse", + "google.generativeai.protos.GenerateMessageResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateMessageResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateMessageResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateMessageResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateMessageResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateMessageResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateMessageResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateMessageResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextRequest", + "google.generativeai.protos.GenerateTextRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateTextRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateTextRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateTextRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateTextRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateTextRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateTextRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateTextRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerateTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextResponse", + "google.generativeai.protos.GenerateTextResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerateTextResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerateTextResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerateTextResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerateTextResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerateTextResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerateTextResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerateTextResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GenerationConfig": "google.ai.generativelanguage_v1beta.types.generative_service.GenerationConfig", + "google.generativeai.protos.GenerationConfig.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GenerationConfig.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GenerationConfig.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GenerationConfig.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GenerationConfig.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GenerationConfig.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GenerationConfig.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GenerationConfig.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.GetCachedContentRequest", + "google.generativeai.protos.GetCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetCachedContentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetCachedContentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetCachedContentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetCachedContentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetCachedContentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetChunkRequest", + "google.generativeai.protos.GetChunkRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetChunkRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetChunkRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetChunkRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetChunkRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetChunkRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetChunkRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetChunkRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetCorpusRequest", + "google.generativeai.protos.GetCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetCorpusRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetCorpusRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetCorpusRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetCorpusRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetCorpusRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetDocumentRequest", + "google.generativeai.protos.GetDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetDocumentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetDocumentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetDocumentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetDocumentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetDocumentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.GetFileRequest", + "google.generativeai.protos.GetFileRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetFileRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetFileRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetFileRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetFileRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetFileRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetFileRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetFileRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetModelRequest", + "google.generativeai.protos.GetModelRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetModelRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetModelRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetModelRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetModelRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetModelRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetModelRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetModelRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetPermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.GetPermissionRequest", + "google.generativeai.protos.GetPermissionRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetPermissionRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetPermissionRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetPermissionRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetPermissionRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetPermissionRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetPermissionRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetPermissionRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GetTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetTunedModelRequest", + "google.generativeai.protos.GetTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GetTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GetTunedModelRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GetTunedModelRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GetTunedModelRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GetTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GetTunedModelRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GetTunedModelRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingAttribution": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingAttribution", + "google.generativeai.protos.GroundingAttribution.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingAttribution.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingAttribution.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingAttribution.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingAttribution.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingAttribution.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingAttribution.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingAttribution.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingPassage": "google.ai.generativelanguage_v1beta.types.content.GroundingPassage", + "google.generativeai.protos.GroundingPassage.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingPassage.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingPassage.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingPassage.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingPassage.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingPassage.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingPassage.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingPassage.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingPassages": "google.ai.generativelanguage_v1beta.types.content.GroundingPassages", + "google.generativeai.protos.GroundingPassages.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingPassages.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingPassages.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingPassages.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingPassages.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingPassages.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingPassages.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingPassages.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.HarmCategory": "google.ai.generativelanguage_v1beta.types.safety.HarmCategory", + "google.generativeai.protos.HarmCategory.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.HarmCategory.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.HarmCategory.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.HarmCategory.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.Hyperparameters": "google.ai.generativelanguage_v1beta.types.tuned_model.Hyperparameters", + "google.generativeai.protos.Hyperparameters.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Hyperparameters.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Hyperparameters.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Hyperparameters.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Hyperparameters.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Hyperparameters.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Hyperparameters.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Hyperparameters.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListCachedContentsRequest": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsRequest", + "google.generativeai.protos.ListCachedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListCachedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListCachedContentsRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListCachedContentsRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListCachedContentsRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListCachedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListCachedContentsRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListCachedContentsRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListCachedContentsResponse": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsResponse", + "google.generativeai.protos.ListCachedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListCachedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListCachedContentsResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListCachedContentsResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListCachedContentsResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListCachedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListCachedContentsResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListCachedContentsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksRequest", + "google.generativeai.protos.ListChunksRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListChunksRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListChunksRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListChunksRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListChunksRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListChunksRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListChunksRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListChunksRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksResponse", + "google.generativeai.protos.ListChunksResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListChunksResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListChunksResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListChunksResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListChunksResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListChunksResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListChunksResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListChunksResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListCorporaRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaRequest", + "google.generativeai.protos.ListCorporaRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListCorporaRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListCorporaRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListCorporaRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListCorporaRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListCorporaRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListCorporaRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListCorporaRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListCorporaResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaResponse", + "google.generativeai.protos.ListCorporaResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListCorporaResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListCorporaResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListCorporaResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListCorporaResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListCorporaResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListCorporaResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListCorporaResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListDocumentsRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsRequest", + "google.generativeai.protos.ListDocumentsRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListDocumentsRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListDocumentsRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListDocumentsRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListDocumentsRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListDocumentsRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListDocumentsRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListDocumentsRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListDocumentsResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsResponse", + "google.generativeai.protos.ListDocumentsResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListDocumentsResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListDocumentsResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListDocumentsResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListDocumentsResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListDocumentsResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListDocumentsResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListDocumentsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListFilesRequest": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesRequest", + "google.generativeai.protos.ListFilesRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListFilesRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListFilesRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListFilesRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListFilesRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListFilesRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListFilesRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListFilesRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListFilesResponse": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesResponse", + "google.generativeai.protos.ListFilesResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListFilesResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListFilesResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListFilesResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListFilesResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListFilesResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListFilesResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListFilesResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsRequest", + "google.generativeai.protos.ListModelsRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListModelsRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListModelsRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListModelsRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListModelsRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListModelsRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListModelsRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListModelsRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsResponse", + "google.generativeai.protos.ListModelsResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListModelsResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListModelsResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListModelsResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListModelsResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListModelsResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListModelsResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListModelsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListPermissionsRequest": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsRequest", + "google.generativeai.protos.ListPermissionsRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListPermissionsRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListPermissionsRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListPermissionsRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListPermissionsRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListPermissionsRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListPermissionsRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListPermissionsRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListPermissionsResponse": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsResponse", + "google.generativeai.protos.ListPermissionsResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListPermissionsResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListPermissionsResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListPermissionsResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListPermissionsResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListPermissionsResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListPermissionsResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListPermissionsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListTunedModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsRequest", + "google.generativeai.protos.ListTunedModelsRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListTunedModelsRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListTunedModelsRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListTunedModelsRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListTunedModelsRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListTunedModelsRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListTunedModelsRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListTunedModelsRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ListTunedModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsResponse", + "google.generativeai.protos.ListTunedModelsResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ListTunedModelsResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ListTunedModelsResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ListTunedModelsResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ListTunedModelsResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ListTunedModelsResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ListTunedModelsResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ListTunedModelsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Message": "google.ai.generativelanguage_v1beta.types.discuss_service.Message", + "google.generativeai.protos.Message.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Message.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Message.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Message.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Message.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Message.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Message.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Message.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.MessagePrompt": "google.ai.generativelanguage_v1beta.types.discuss_service.MessagePrompt", + "google.generativeai.protos.MessagePrompt.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.MessagePrompt.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.MessagePrompt.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.MessagePrompt.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.MessagePrompt.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.MessagePrompt.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.MessagePrompt.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.MessagePrompt.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.MetadataFilter": "google.ai.generativelanguage_v1beta.types.retriever.MetadataFilter", + "google.generativeai.protos.MetadataFilter.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.MetadataFilter.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.MetadataFilter.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.MetadataFilter.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.MetadataFilter.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.MetadataFilter.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.MetadataFilter.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.MetadataFilter.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Model": "google.ai.generativelanguage_v1beta.types.model.Model", + "google.generativeai.protos.Model.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Model.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Model.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Model.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Model.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Model.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Model.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Model.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Part": "google.ai.generativelanguage_v1beta.types.content.Part", + "google.generativeai.protos.Part.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Part.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Part.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Part.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Part.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Part.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Part.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Part.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Permission": "google.ai.generativelanguage_v1beta.types.permission.Permission", + "google.generativeai.protos.Permission.GranteeType": "google.ai.generativelanguage_v1beta.types.permission.Permission.GranteeType", + "google.generativeai.protos.Permission.GranteeType.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.Permission.GranteeType.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.Permission.GranteeType.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.Permission.GranteeType.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.Permission.Role": "google.ai.generativelanguage_v1beta.types.permission.Permission.Role", + "google.generativeai.protos.Permission.Role.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.Permission.Role.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.Permission.Role.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.Permission.Role.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.Permission.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Permission.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Permission.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Permission.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Permission.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Permission.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Permission.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Permission.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.QueryCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusRequest", + "google.generativeai.protos.QueryCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.QueryCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.QueryCorpusRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.QueryCorpusRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.QueryCorpusRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.QueryCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.QueryCorpusRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.QueryCorpusRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.QueryCorpusResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusResponse", + "google.generativeai.protos.QueryCorpusResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.QueryCorpusResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.QueryCorpusResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.QueryCorpusResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.QueryCorpusResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.QueryCorpusResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.QueryCorpusResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.QueryCorpusResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.QueryDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentRequest", + "google.generativeai.protos.QueryDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.QueryDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.QueryDocumentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.QueryDocumentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.QueryDocumentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.QueryDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.QueryDocumentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.QueryDocumentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.QueryDocumentResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentResponse", + "google.generativeai.protos.QueryDocumentResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.QueryDocumentResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.QueryDocumentResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.QueryDocumentResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.QueryDocumentResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.QueryDocumentResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.QueryDocumentResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.QueryDocumentResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.RelevantChunk": "google.ai.generativelanguage_v1beta.types.retriever_service.RelevantChunk", + "google.generativeai.protos.RelevantChunk.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.RelevantChunk.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.RelevantChunk.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.RelevantChunk.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.RelevantChunk.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.RelevantChunk.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.RelevantChunk.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.RelevantChunk.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.SafetyFeedback": "google.ai.generativelanguage_v1beta.types.safety.SafetyFeedback", + "google.generativeai.protos.SafetyFeedback.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.SafetyFeedback.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.SafetyFeedback.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.SafetyFeedback.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.SafetyFeedback.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.SafetyFeedback.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.SafetyFeedback.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.SafetyFeedback.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.SafetyRating": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating", + "google.generativeai.protos.SafetyRating.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.SafetyRating.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.SafetyRating.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.SafetyRating.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.SafetyRating.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.SafetyRating.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.SafetyRating.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.SafetyRating.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.SafetySetting": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting", + "google.generativeai.protos.SafetySetting.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.SafetySetting.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.SafetySetting.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.SafetySetting.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.SafetySetting.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.SafetySetting.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.SafetySetting.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.SafetySetting.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Schema": "google.ai.generativelanguage_v1beta.types.content.Schema", + "google.generativeai.protos.Schema.PropertiesEntry": "google.ai.generativelanguage_v1beta.types.content.Schema.PropertiesEntry", + "google.generativeai.protos.Schema.PropertiesEntry.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Schema.PropertiesEntry.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Schema.PropertiesEntry.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Schema.PropertiesEntry.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Schema.PropertiesEntry.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Schema.PropertiesEntry.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Schema.PropertiesEntry.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Schema.PropertiesEntry.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Schema.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Schema.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Schema.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Schema.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Schema.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Schema.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Schema.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Schema.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.SemanticRetrieverConfig": "google.ai.generativelanguage_v1beta.types.generative_service.SemanticRetrieverConfig", + "google.generativeai.protos.SemanticRetrieverConfig.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.SemanticRetrieverConfig.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.SemanticRetrieverConfig.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.SemanticRetrieverConfig.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.SemanticRetrieverConfig.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.SemanticRetrieverConfig.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.SemanticRetrieverConfig.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.SemanticRetrieverConfig.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.StringList": "google.ai.generativelanguage_v1beta.types.retriever.StringList", + "google.generativeai.protos.StringList.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.StringList.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.StringList.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.StringList.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.StringList.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.StringList.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.StringList.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.StringList.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TaskType": "google.ai.generativelanguage_v1beta.types.generative_service.TaskType", + "google.generativeai.protos.TaskType.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.TaskType.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.TaskType.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.TaskType.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.TextCompletion": "google.ai.generativelanguage_v1beta.types.text_service.TextCompletion", + "google.generativeai.protos.TextCompletion.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TextCompletion.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TextCompletion.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TextCompletion.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TextCompletion.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TextCompletion.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TextCompletion.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TextCompletion.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TextPrompt": "google.ai.generativelanguage_v1beta.types.text_service.TextPrompt", + "google.generativeai.protos.TextPrompt.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TextPrompt.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TextPrompt.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TextPrompt.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TextPrompt.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TextPrompt.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TextPrompt.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TextPrompt.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Tool": "google.ai.generativelanguage_v1beta.types.content.Tool", + "google.generativeai.protos.Tool.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Tool.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Tool.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Tool.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Tool.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Tool.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Tool.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Tool.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.ToolConfig": "google.ai.generativelanguage_v1beta.types.content.ToolConfig", + "google.generativeai.protos.ToolConfig.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.ToolConfig.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.ToolConfig.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.ToolConfig.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.ToolConfig.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.ToolConfig.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.ToolConfig.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.ToolConfig.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TransferOwnershipRequest": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipRequest", + "google.generativeai.protos.TransferOwnershipRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TransferOwnershipRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TransferOwnershipRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TransferOwnershipRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TransferOwnershipRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TransferOwnershipRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TransferOwnershipRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TransferOwnershipRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TransferOwnershipResponse": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipResponse", + "google.generativeai.protos.TransferOwnershipResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TransferOwnershipResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TransferOwnershipResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TransferOwnershipResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TransferOwnershipResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TransferOwnershipResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TransferOwnershipResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TransferOwnershipResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TunedModel": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel", + "google.generativeai.protos.TunedModel.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TunedModel.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TunedModel.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TunedModel.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TunedModel.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TunedModel.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TunedModel.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TunedModel.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TunedModelSource": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModelSource", + "google.generativeai.protos.TunedModelSource.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TunedModelSource.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TunedModelSource.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TunedModelSource.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TunedModelSource.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TunedModelSource.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TunedModelSource.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TunedModelSource.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TuningExample": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExample", + "google.generativeai.protos.TuningExample.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TuningExample.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TuningExample.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TuningExample.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TuningExample.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TuningExample.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TuningExample.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TuningExample.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TuningExamples": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExamples", + "google.generativeai.protos.TuningExamples.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TuningExamples.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TuningExamples.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TuningExamples.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TuningExamples.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TuningExamples.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TuningExamples.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TuningExamples.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TuningSnapshot": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningSnapshot", + "google.generativeai.protos.TuningSnapshot.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TuningSnapshot.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TuningSnapshot.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TuningSnapshot.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TuningSnapshot.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TuningSnapshot.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TuningSnapshot.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TuningSnapshot.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.TuningTask": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningTask", + "google.generativeai.protos.TuningTask.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.TuningTask.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.TuningTask.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.TuningTask.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.TuningTask.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.TuningTask.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.TuningTask.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.TuningTask.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Type": "google.ai.generativelanguage_v1beta.types.content.Type", + "google.generativeai.protos.Type.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.Type.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.Type.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.Type.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.UpdateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.UpdateCachedContentRequest", + "google.generativeai.protos.UpdateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.UpdateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.UpdateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.UpdateCachedContentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.UpdateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.UpdateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.UpdateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.UpdateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.UpdateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateChunkRequest", + "google.generativeai.protos.UpdateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.UpdateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.UpdateChunkRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.UpdateChunkRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.UpdateChunkRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.UpdateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.UpdateChunkRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.UpdateChunkRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.UpdateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateCorpusRequest", + "google.generativeai.protos.UpdateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.UpdateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.UpdateCorpusRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.UpdateCorpusRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.UpdateCorpusRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.UpdateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.UpdateCorpusRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.UpdateCorpusRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.UpdateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateDocumentRequest", + "google.generativeai.protos.UpdateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.UpdateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.UpdateDocumentRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.UpdateDocumentRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.UpdateDocumentRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.UpdateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.UpdateDocumentRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.UpdateDocumentRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.UpdatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.UpdatePermissionRequest", + "google.generativeai.protos.UpdatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.UpdatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.UpdatePermissionRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.UpdatePermissionRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.UpdatePermissionRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.UpdatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.UpdatePermissionRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.UpdatePermissionRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.UpdateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.UpdateTunedModelRequest", + "google.generativeai.protos.UpdateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.UpdateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.UpdateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.UpdateTunedModelRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.UpdateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.UpdateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.UpdateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.UpdateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.VideoMetadata": "google.ai.generativelanguage_v1beta.types.file.VideoMetadata", + "google.generativeai.protos.VideoMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.VideoMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.VideoMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.VideoMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.VideoMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.VideoMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.VideoMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.VideoMetadata.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.types": "google.generativeai.types", + "google.generativeai.types.AsyncGenerateContentResponse": "google.generativeai.types.generation_types.AsyncGenerateContentResponse", + "google.generativeai.types.AsyncGenerateContentResponse.__init__": "google.generativeai.types.generation_types.BaseGenerateContentResponse.__init__", + "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_aiterator", + "google.generativeai.types.AsyncGenerateContentResponse.from_response": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_response", + "google.generativeai.types.AsyncGenerateContentResponse.resolve": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.resolve", + "google.generativeai.types.AsyncGenerateContentResponse.to_dict": "google.generativeai.types.generation_types.BaseGenerateContentResponse.to_dict", + "google.generativeai.types.AuthorError": "google.generativeai.types.discuss_types.AuthorError", + "google.generativeai.types.BlobDict": "google.generativeai.types.content_types.BlobDict", + "google.generativeai.types.BlockedPromptException": "google.generativeai.types.generation_types.BlockedPromptException", + "google.generativeai.types.BlockedReason": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter.BlockedReason", + "google.generativeai.types.BlockedReason.__contains__": "enum.EnumType.__contains__", + "google.generativeai.types.BlockedReason.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.types.BlockedReason.__iter__": "enum.EnumType.__iter__", + "google.generativeai.types.BlockedReason.__len__": "enum.EnumType.__len__", + "google.generativeai.types.BrokenResponseError": "google.generativeai.types.generation_types.BrokenResponseError", + "google.generativeai.types.CallableFunctionDeclaration": "google.generativeai.types.content_types.CallableFunctionDeclaration", + "google.generativeai.types.CallableFunctionDeclaration.__call__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__call__", + "google.generativeai.types.CallableFunctionDeclaration.__init__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__init__", + "google.generativeai.types.CallableFunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto", + "google.generativeai.types.ChatResponse": "google.generativeai.types.discuss_types.ChatResponse", + "google.generativeai.types.ChatResponse.__eq__": "google.generativeai.types.discuss_types.ChatResponse.__eq__", + "google.generativeai.types.ChatResponse.reply": "google.generativeai.types.discuss_types.ChatResponse.reply", + "google.generativeai.types.ChatResponse.to_dict": "google.generativeai.types.discuss_types.ChatResponse.to_dict", + "google.generativeai.types.CitationMetadataDict": "google.generativeai.types.citation_types.CitationMetadataDict", + "google.generativeai.types.CitationSourceDict": "google.generativeai.types.citation_types.CitationSourceDict", + "google.generativeai.types.Completion": "google.generativeai.types.text_types.Completion", + "google.generativeai.types.Completion.__eq__": "google.generativeai.types.text_types.Completion.__eq__", + "google.generativeai.types.Completion.to_dict": "google.generativeai.types.text_types.Completion.to_dict", + "google.generativeai.types.ContentDict": "google.generativeai.types.content_types.ContentDict", + "google.generativeai.types.ContentFilterDict": "google.generativeai.types.safety_types.ContentFilterDict", + "google.generativeai.types.ExampleDict": "google.generativeai.types.discuss_types.ExampleDict", + "google.generativeai.types.File": "google.generativeai.types.file_types.File", + "google.generativeai.types.File.__init__": "google.generativeai.types.file_types.File.__init__", + "google.generativeai.types.File.delete": "google.generativeai.types.file_types.File.delete", + "google.generativeai.types.File.to_dict": "google.generativeai.types.file_types.File.to_dict", + "google.generativeai.types.File.to_proto": "google.generativeai.types.file_types.File.to_proto", + "google.generativeai.types.FileDataDict": "google.generativeai.types.file_types.FileDataDict", + "google.generativeai.types.FunctionDeclaration": "google.generativeai.types.content_types.FunctionDeclaration", + "google.generativeai.types.FunctionDeclaration.__init__": "google.generativeai.types.content_types.FunctionDeclaration.__init__", + "google.generativeai.types.FunctionDeclaration.from_function": "google.generativeai.types.content_types.FunctionDeclaration.from_function", + "google.generativeai.types.FunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto", + "google.generativeai.types.FunctionDeclaration.to_proto": "google.generativeai.types.content_types.FunctionDeclaration.to_proto", + "google.generativeai.types.FunctionLibrary": "google.generativeai.types.content_types.FunctionLibrary", + "google.generativeai.types.FunctionLibrary.__call__": "google.generativeai.types.content_types.FunctionLibrary.__call__", + "google.generativeai.types.FunctionLibrary.__getitem__": "google.generativeai.types.content_types.FunctionLibrary.__getitem__", + "google.generativeai.types.FunctionLibrary.__init__": "google.generativeai.types.content_types.FunctionLibrary.__init__", + "google.generativeai.types.FunctionLibrary.to_proto": "google.generativeai.types.content_types.FunctionLibrary.to_proto", + "google.generativeai.types.GenerateContentResponse": "google.generativeai.types.generation_types.GenerateContentResponse", + "google.generativeai.types.GenerateContentResponse.__iter__": "google.generativeai.types.generation_types.GenerateContentResponse.__iter__", + "google.generativeai.types.GenerateContentResponse.from_iterator": "google.generativeai.types.generation_types.GenerateContentResponse.from_iterator", + "google.generativeai.types.GenerateContentResponse.from_response": "google.generativeai.types.generation_types.GenerateContentResponse.from_response", + "google.generativeai.types.GenerateContentResponse.resolve": "google.generativeai.types.generation_types.GenerateContentResponse.resolve", + "google.generativeai.types.GenerationConfig": "google.generativeai.types.generation_types.GenerationConfig", + "google.generativeai.types.GenerationConfig.__eq__": "google.generativeai.types.generation_types.GenerationConfig.__eq__", + "google.generativeai.types.GenerationConfig.__init__": "google.generativeai.types.generation_types.GenerationConfig.__init__", + "google.generativeai.types.GenerationConfigDict": "google.generativeai.types.generation_types.GenerationConfigDict", + "google.generativeai.types.HarmBlockThreshold": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting.HarmBlockThreshold", + "google.generativeai.types.HarmBlockThreshold.__contains__": "enum.EnumType.__contains__", + "google.generativeai.types.HarmBlockThreshold.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.types.HarmBlockThreshold.__iter__": "enum.EnumType.__iter__", + "google.generativeai.types.HarmBlockThreshold.__len__": "enum.EnumType.__len__", + "google.generativeai.types.HarmCategory": "google.generativeai.types.safety_types.HarmCategory", + "google.generativeai.types.HarmCategory.__contains__": "enum.EnumType.__contains__", + "google.generativeai.types.HarmCategory.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.types.HarmCategory.__iter__": "enum.EnumType.__iter__", + "google.generativeai.types.HarmCategory.__len__": "enum.EnumType.__len__", + "google.generativeai.types.HarmProbability": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating.HarmProbability", + "google.generativeai.types.HarmProbability.__contains__": "enum.EnumType.__contains__", + "google.generativeai.types.HarmProbability.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.types.HarmProbability.__iter__": "enum.EnumType.__iter__", + "google.generativeai.types.HarmProbability.__len__": "enum.EnumType.__len__", + "google.generativeai.types.IncompleteIterationError": "google.generativeai.types.generation_types.IncompleteIterationError", + "google.generativeai.types.MessageDict": "google.generativeai.types.discuss_types.MessageDict", + "google.generativeai.types.MessagePromptDict": "google.generativeai.types.discuss_types.MessagePromptDict", + "google.generativeai.types.Model": "google.generativeai.types.model_types.Model", + "google.generativeai.types.Model.__eq__": "google.generativeai.types.model_types.Model.__eq__", + "google.generativeai.types.Model.__init__": "google.generativeai.types.model_types.Model.__init__", + "google.generativeai.types.PartDict": "google.generativeai.types.content_types.PartDict", + "google.generativeai.types.Permission": "google.generativeai.types.permission_types.Permission", + "google.generativeai.types.Permission.__eq__": "google.generativeai.types.permission_types.Permission.__eq__", + "google.generativeai.types.Permission.__init__": "google.generativeai.types.permission_types.Permission.__init__", + "google.generativeai.types.Permission.delete": "google.generativeai.types.permission_types.Permission.delete", + "google.generativeai.types.Permission.delete_async": "google.generativeai.types.permission_types.Permission.delete_async", + "google.generativeai.types.Permission.get": "google.generativeai.types.permission_types.Permission.get", + "google.generativeai.types.Permission.get_async": "google.generativeai.types.permission_types.Permission.get_async", + "google.generativeai.types.Permission.to_dict": "google.generativeai.types.permission_types.Permission.to_dict", + "google.generativeai.types.Permission.update": "google.generativeai.types.permission_types.Permission.update", + "google.generativeai.types.Permission.update_async": "google.generativeai.types.permission_types.Permission.update_async", + "google.generativeai.types.Permissions": "google.generativeai.types.permission_types.Permissions", + "google.generativeai.types.Permissions.__init__": "google.generativeai.types.permission_types.Permissions.__init__", + "google.generativeai.types.Permissions.__iter__": "google.generativeai.types.permission_types.Permissions.__iter__", + "google.generativeai.types.Permissions.create": "google.generativeai.types.permission_types.Permissions.create", + "google.generativeai.types.Permissions.create_async": "google.generativeai.types.permission_types.Permissions.create_async", + "google.generativeai.types.Permissions.get": "google.generativeai.types.permission_types.Permissions.get", + "google.generativeai.types.Permissions.get_async": "google.generativeai.types.permission_types.Permissions.get_async", + "google.generativeai.types.Permissions.list": "google.generativeai.types.permission_types.Permissions.list", + "google.generativeai.types.Permissions.list_async": "google.generativeai.types.permission_types.Permissions.list_async", + "google.generativeai.types.Permissions.transfer_ownership": "google.generativeai.types.permission_types.Permissions.transfer_ownership", + "google.generativeai.types.Permissions.transfer_ownership_async": "google.generativeai.types.permission_types.Permissions.transfer_ownership_async", + "google.generativeai.types.RequestOptions": "google.generativeai.types.helper_types.RequestOptions", + "google.generativeai.types.RequestOptions.__contains__": "collections.abc.Mapping.__contains__", + "google.generativeai.types.RequestOptions.__eq__": "google.generativeai.types.helper_types.RequestOptions.__eq__", + "google.generativeai.types.RequestOptions.__getitem__": "google.generativeai.types.helper_types.RequestOptions.__getitem__", + "google.generativeai.types.RequestOptions.__init__": "google.generativeai.types.helper_types.RequestOptions.__init__", + "google.generativeai.types.RequestOptions.__iter__": "google.generativeai.types.helper_types.RequestOptions.__iter__", + "google.generativeai.types.RequestOptions.__len__": "google.generativeai.types.helper_types.RequestOptions.__len__", + "google.generativeai.types.RequestOptions.get": "collections.abc.Mapping.get", + "google.generativeai.types.RequestOptions.items": "collections.abc.Mapping.items", + "google.generativeai.types.RequestOptions.keys": "collections.abc.Mapping.keys", + "google.generativeai.types.RequestOptions.values": "collections.abc.Mapping.values", + "google.generativeai.types.ResponseDict": "google.generativeai.types.discuss_types.ResponseDict", + "google.generativeai.types.SafetyFeedbackDict": "google.generativeai.types.safety_types.SafetyFeedbackDict", + "google.generativeai.types.SafetyRatingDict": "google.generativeai.types.safety_types.SafetyRatingDict", + "google.generativeai.types.SafetySettingDict": "google.generativeai.types.safety_types.SafetySettingDict", + "google.generativeai.types.Status": "google.rpc.status_pb2.Status", + "google.generativeai.types.Status.RegisterExtension": "google.protobuf.message.Message.RegisterExtension", + "google.generativeai.types.StopCandidateException": "google.generativeai.types.generation_types.StopCandidateException", + "google.generativeai.types.Tool": "google.generativeai.types.content_types.Tool", + "google.generativeai.types.Tool.__call__": "google.generativeai.types.content_types.Tool.__call__", + "google.generativeai.types.Tool.__getitem__": "google.generativeai.types.content_types.Tool.__getitem__", + "google.generativeai.types.Tool.__init__": "google.generativeai.types.content_types.Tool.__init__", + "google.generativeai.types.Tool.to_proto": "google.generativeai.types.content_types.Tool.to_proto", + "google.generativeai.types.ToolDict": "google.generativeai.types.content_types.ToolDict", + "google.generativeai.types.TunedModel": "google.generativeai.types.model_types.TunedModel", + "google.generativeai.types.TunedModel.__eq__": "google.generativeai.types.model_types.TunedModel.__eq__", + "google.generativeai.types.TunedModel.__init__": "google.generativeai.types.model_types.TunedModel.__init__", + "google.generativeai.types.TunedModelState": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel.State", + "google.generativeai.types.TunedModelState.__contains__": "enum.EnumType.__contains__", + "google.generativeai.types.TunedModelState.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.types.TunedModelState.__iter__": "enum.EnumType.__iter__", + "google.generativeai.types.TunedModelState.__len__": "enum.EnumType.__len__", + "google.generativeai.types.TypedDict": "typing_extensions.TypedDict", + "google.generativeai.types.get_default_file_client": "google.generativeai.client.get_default_file_client", + "google.generativeai.types.to_file_data": "google.generativeai.types.file_types.to_file_data", + "google.generativeai.update_tuned_model": "google.generativeai.models.update_tuned_model", + "google.generativeai.upload_file": "google.generativeai.files.upload_file" + }, + "py_module_names": { + "google.generativeai": "google.generativeai" + } +} diff --git a/docs/api/google/generativeai/_redirects.yaml b/docs/api/google/generativeai/_redirects.yaml new file mode 100644 index 000000000..cea696430 --- /dev/null +++ b/docs/api/google/generativeai/_redirects.yaml @@ -0,0 +1,13 @@ +redirects: +- from: /api/python/google/generativeai/GenerationConfig + to: /api/python/google/generativeai/types/GenerationConfig +- from: /api/python/google/generativeai/protos/ContentFilter/BlockedReason + to: /api/python/google/generativeai/types/BlockedReason +- from: /api/python/google/generativeai/protos/SafetyRating/HarmProbability + to: /api/python/google/generativeai/types/HarmProbability +- from: /api/python/google/generativeai/protos/SafetySetting/HarmBlockThreshold + to: /api/python/google/generativeai/types/HarmBlockThreshold +- from: /api/python/google/generativeai/protos/TunedModel/State + to: /api/python/google/generativeai/types/TunedModelState +- from: /api/python/google/generativeai/types/ModelNameOptions + to: /api/python/google/generativeai/types/AnyModelNameOptions diff --git a/docs/api/google/generativeai/_toc.yaml b/docs/api/google/generativeai/_toc.yaml new file mode 100644 index 000000000..99797d5d8 --- /dev/null +++ b/docs/api/google/generativeai/_toc.yaml @@ -0,0 +1,507 @@ +toc: +- title: google.generativeai + section: + - title: Overview + path: /api/python/google/generativeai + - title: ChatSession + path: /api/python/google/generativeai/ChatSession + - title: GenerativeModel + path: /api/python/google/generativeai/GenerativeModel + - title: chat + path: /api/python/google/generativeai/chat + - title: chat_async + path: /api/python/google/generativeai/chat_async + - title: configure + path: /api/python/google/generativeai/configure + - title: count_message_tokens + path: /api/python/google/generativeai/count_message_tokens + - title: count_text_tokens + path: /api/python/google/generativeai/count_text_tokens + - title: create_tuned_model + path: /api/python/google/generativeai/create_tuned_model + - title: delete_file + path: /api/python/google/generativeai/delete_file + - title: delete_tuned_model + path: /api/python/google/generativeai/delete_tuned_model + - title: embed_content + path: /api/python/google/generativeai/embed_content + - title: embed_content_async + path: /api/python/google/generativeai/embed_content_async + - title: generate_embeddings + path: /api/python/google/generativeai/generate_embeddings + - title: generate_text + path: /api/python/google/generativeai/generate_text + - title: get_base_model + path: /api/python/google/generativeai/get_base_model + - title: get_file + path: /api/python/google/generativeai/get_file + - title: get_model + path: /api/python/google/generativeai/get_model + - title: get_operation + path: /api/python/google/generativeai/get_operation + - title: get_tuned_model + path: /api/python/google/generativeai/get_tuned_model + - title: list_files + path: /api/python/google/generativeai/list_files + - title: list_models + path: /api/python/google/generativeai/list_models + - title: list_operations + path: /api/python/google/generativeai/list_operations + - title: list_tuned_models + path: /api/python/google/generativeai/list_tuned_models + - title: update_tuned_model + path: /api/python/google/generativeai/update_tuned_model + - title: upload_file + path: /api/python/google/generativeai/upload_file + - title: protos + section: + - title: Overview + path: /api/python/google/generativeai/protos + - title: AttributionSourceId + path: /api/python/google/generativeai/protos/AttributionSourceId + - title: AttributionSourceId.GroundingPassageId + path: /api/python/google/generativeai/protos/AttributionSourceId/GroundingPassageId + - title: AttributionSourceId.SemanticRetrieverChunk + path: /api/python/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk + - title: BatchCreateChunksRequest + path: /api/python/google/generativeai/protos/BatchCreateChunksRequest + - title: BatchCreateChunksResponse + path: /api/python/google/generativeai/protos/BatchCreateChunksResponse + - title: BatchDeleteChunksRequest + path: /api/python/google/generativeai/protos/BatchDeleteChunksRequest + - title: BatchEmbedContentsRequest + path: /api/python/google/generativeai/protos/BatchEmbedContentsRequest + - title: BatchEmbedContentsResponse + path: /api/python/google/generativeai/protos/BatchEmbedContentsResponse + - title: BatchEmbedTextRequest + path: /api/python/google/generativeai/protos/BatchEmbedTextRequest + - title: BatchEmbedTextResponse + path: /api/python/google/generativeai/protos/BatchEmbedTextResponse + - title: BatchUpdateChunksRequest + path: /api/python/google/generativeai/protos/BatchUpdateChunksRequest + - title: BatchUpdateChunksResponse + path: /api/python/google/generativeai/protos/BatchUpdateChunksResponse + - title: Blob + path: /api/python/google/generativeai/protos/Blob + - title: CachedContent + path: /api/python/google/generativeai/protos/CachedContent + - title: CachedContent.UsageMetadata + path: /api/python/google/generativeai/protos/CachedContent/UsageMetadata + - title: Candidate + path: /api/python/google/generativeai/protos/Candidate + - title: Candidate.FinishReason + path: /api/python/google/generativeai/protos/Candidate/FinishReason + - title: Chunk + path: /api/python/google/generativeai/protos/Chunk + - title: Chunk.State + path: /api/python/google/generativeai/protos/Chunk/State + - title: ChunkData + path: /api/python/google/generativeai/protos/ChunkData + - title: CitationMetadata + path: /api/python/google/generativeai/protos/CitationMetadata + - title: CitationSource + path: /api/python/google/generativeai/protos/CitationSource + - title: CodeExecution + path: /api/python/google/generativeai/protos/CodeExecution + - title: CodeExecutionResult + path: /api/python/google/generativeai/protos/CodeExecutionResult + - title: CodeExecutionResult.Outcome + path: /api/python/google/generativeai/protos/CodeExecutionResult/Outcome + - title: Condition + path: /api/python/google/generativeai/protos/Condition + - title: Condition.Operator + path: /api/python/google/generativeai/protos/Condition/Operator + - title: Content + path: /api/python/google/generativeai/protos/Content + - title: ContentEmbedding + path: /api/python/google/generativeai/protos/ContentEmbedding + - title: ContentFilter + path: /api/python/google/generativeai/protos/ContentFilter + - title: Corpus + path: /api/python/google/generativeai/protos/Corpus + - title: CountMessageTokensRequest + path: /api/python/google/generativeai/protos/CountMessageTokensRequest + - title: CountMessageTokensResponse + path: /api/python/google/generativeai/protos/CountMessageTokensResponse + - title: CountTextTokensRequest + path: /api/python/google/generativeai/protos/CountTextTokensRequest + - title: CountTextTokensResponse + path: /api/python/google/generativeai/protos/CountTextTokensResponse + - title: CountTokensRequest + path: /api/python/google/generativeai/protos/CountTokensRequest + - title: CountTokensResponse + path: /api/python/google/generativeai/protos/CountTokensResponse + - title: CreateCachedContentRequest + path: /api/python/google/generativeai/protos/CreateCachedContentRequest + - title: CreateChunkRequest + path: /api/python/google/generativeai/protos/CreateChunkRequest + - title: CreateCorpusRequest + path: /api/python/google/generativeai/protos/CreateCorpusRequest + - title: CreateDocumentRequest + path: /api/python/google/generativeai/protos/CreateDocumentRequest + - title: CreateFileRequest + path: /api/python/google/generativeai/protos/CreateFileRequest + - title: CreateFileResponse + path: /api/python/google/generativeai/protos/CreateFileResponse + - title: CreatePermissionRequest + path: /api/python/google/generativeai/protos/CreatePermissionRequest + - title: CreateTunedModelMetadata + path: /api/python/google/generativeai/protos/CreateTunedModelMetadata + - title: CreateTunedModelRequest + path: /api/python/google/generativeai/protos/CreateTunedModelRequest + - title: CustomMetadata + path: /api/python/google/generativeai/protos/CustomMetadata + - title: Dataset + path: /api/python/google/generativeai/protos/Dataset + - title: DeleteCachedContentRequest + path: /api/python/google/generativeai/protos/DeleteCachedContentRequest + - title: DeleteChunkRequest + path: /api/python/google/generativeai/protos/DeleteChunkRequest + - title: DeleteCorpusRequest + path: /api/python/google/generativeai/protos/DeleteCorpusRequest + - title: DeleteDocumentRequest + path: /api/python/google/generativeai/protos/DeleteDocumentRequest + - title: DeleteFileRequest + path: /api/python/google/generativeai/protos/DeleteFileRequest + - title: DeletePermissionRequest + path: /api/python/google/generativeai/protos/DeletePermissionRequest + - title: DeleteTunedModelRequest + path: /api/python/google/generativeai/protos/DeleteTunedModelRequest + - title: Document + path: /api/python/google/generativeai/protos/Document + - title: EmbedContentRequest + path: /api/python/google/generativeai/protos/EmbedContentRequest + - title: EmbedContentResponse + path: /api/python/google/generativeai/protos/EmbedContentResponse + - title: EmbedTextRequest + path: /api/python/google/generativeai/protos/EmbedTextRequest + - title: EmbedTextResponse + path: /api/python/google/generativeai/protos/EmbedTextResponse + - title: Embedding + path: /api/python/google/generativeai/protos/Embedding + - title: Example + path: /api/python/google/generativeai/protos/Example + - title: ExecutableCode + path: /api/python/google/generativeai/protos/ExecutableCode + - title: ExecutableCode.Language + path: /api/python/google/generativeai/protos/ExecutableCode/Language + - title: File + path: /api/python/google/generativeai/protos/File + - title: File.State + path: /api/python/google/generativeai/protos/File/State + - title: FileData + path: /api/python/google/generativeai/protos/FileData + - title: FunctionCall + path: /api/python/google/generativeai/protos/FunctionCall + - title: FunctionCallingConfig + path: /api/python/google/generativeai/protos/FunctionCallingConfig + - title: FunctionCallingConfig.Mode + path: /api/python/google/generativeai/protos/FunctionCallingConfig/Mode + - title: FunctionDeclaration + path: /api/python/google/generativeai/protos/FunctionDeclaration + - title: FunctionResponse + path: /api/python/google/generativeai/protos/FunctionResponse + - title: GenerateAnswerRequest + path: /api/python/google/generativeai/protos/GenerateAnswerRequest + - title: GenerateAnswerRequest.AnswerStyle + path: /api/python/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle + - title: GenerateAnswerResponse + path: /api/python/google/generativeai/protos/GenerateAnswerResponse + - title: GenerateAnswerResponse.InputFeedback + path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback + - title: GenerateAnswerResponse.InputFeedback.BlockReason + path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason + - title: GenerateContentRequest + path: /api/python/google/generativeai/protos/GenerateContentRequest + - title: GenerateContentResponse + path: /api/python/google/generativeai/protos/GenerateContentResponse + - title: GenerateContentResponse.PromptFeedback + path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback + - title: GenerateContentResponse.PromptFeedback.BlockReason + path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason + - title: GenerateContentResponse.UsageMetadata + path: /api/python/google/generativeai/protos/GenerateContentResponse/UsageMetadata + - title: GenerateMessageRequest + path: /api/python/google/generativeai/protos/GenerateMessageRequest + - title: GenerateMessageResponse + path: /api/python/google/generativeai/protos/GenerateMessageResponse + - title: GenerateTextRequest + path: /api/python/google/generativeai/protos/GenerateTextRequest + - title: GenerateTextResponse + path: /api/python/google/generativeai/protos/GenerateTextResponse + - title: GenerationConfig + path: /api/python/google/generativeai/protos/GenerationConfig + - title: GetCachedContentRequest + path: /api/python/google/generativeai/protos/GetCachedContentRequest + - title: GetChunkRequest + path: /api/python/google/generativeai/protos/GetChunkRequest + - title: GetCorpusRequest + path: /api/python/google/generativeai/protos/GetCorpusRequest + - title: GetDocumentRequest + path: /api/python/google/generativeai/protos/GetDocumentRequest + - title: GetFileRequest + path: /api/python/google/generativeai/protos/GetFileRequest + - title: GetModelRequest + path: /api/python/google/generativeai/protos/GetModelRequest + - title: GetPermissionRequest + path: /api/python/google/generativeai/protos/GetPermissionRequest + - title: GetTunedModelRequest + path: /api/python/google/generativeai/protos/GetTunedModelRequest + - title: GroundingAttribution + path: /api/python/google/generativeai/protos/GroundingAttribution + - title: GroundingPassage + path: /api/python/google/generativeai/protos/GroundingPassage + - title: GroundingPassages + path: /api/python/google/generativeai/protos/GroundingPassages + - title: HarmCategory + path: /api/python/google/generativeai/protos/HarmCategory + - title: Hyperparameters + path: /api/python/google/generativeai/protos/Hyperparameters + - title: ListCachedContentsRequest + path: /api/python/google/generativeai/protos/ListCachedContentsRequest + - title: ListCachedContentsResponse + path: /api/python/google/generativeai/protos/ListCachedContentsResponse + - title: ListChunksRequest + path: /api/python/google/generativeai/protos/ListChunksRequest + - title: ListChunksResponse + path: /api/python/google/generativeai/protos/ListChunksResponse + - title: ListCorporaRequest + path: /api/python/google/generativeai/protos/ListCorporaRequest + - title: ListCorporaResponse + path: /api/python/google/generativeai/protos/ListCorporaResponse + - title: ListDocumentsRequest + path: /api/python/google/generativeai/protos/ListDocumentsRequest + - title: ListDocumentsResponse + path: /api/python/google/generativeai/protos/ListDocumentsResponse + - title: ListFilesRequest + path: /api/python/google/generativeai/protos/ListFilesRequest + - title: ListFilesResponse + path: /api/python/google/generativeai/protos/ListFilesResponse + - title: ListModelsRequest + path: /api/python/google/generativeai/protos/ListModelsRequest + - title: ListModelsResponse + path: /api/python/google/generativeai/protos/ListModelsResponse + - title: ListPermissionsRequest + path: /api/python/google/generativeai/protos/ListPermissionsRequest + - title: ListPermissionsResponse + path: /api/python/google/generativeai/protos/ListPermissionsResponse + - title: ListTunedModelsRequest + path: /api/python/google/generativeai/protos/ListTunedModelsRequest + - title: ListTunedModelsResponse + path: /api/python/google/generativeai/protos/ListTunedModelsResponse + - title: Message + path: /api/python/google/generativeai/protos/Message + - title: MessagePrompt + path: /api/python/google/generativeai/protos/MessagePrompt + - title: MetadataFilter + path: /api/python/google/generativeai/protos/MetadataFilter + - title: Model + path: /api/python/google/generativeai/protos/Model + - title: Part + path: /api/python/google/generativeai/protos/Part + - title: Permission + path: /api/python/google/generativeai/protos/Permission + - title: Permission.GranteeType + path: /api/python/google/generativeai/protos/Permission/GranteeType + - title: Permission.Role + path: /api/python/google/generativeai/protos/Permission/Role + - title: QueryCorpusRequest + path: /api/python/google/generativeai/protos/QueryCorpusRequest + - title: QueryCorpusResponse + path: /api/python/google/generativeai/protos/QueryCorpusResponse + - title: QueryDocumentRequest + path: /api/python/google/generativeai/protos/QueryDocumentRequest + - title: QueryDocumentResponse + path: /api/python/google/generativeai/protos/QueryDocumentResponse + - title: RelevantChunk + path: /api/python/google/generativeai/protos/RelevantChunk + - title: SafetyFeedback + path: /api/python/google/generativeai/protos/SafetyFeedback + - title: SafetyRating + path: /api/python/google/generativeai/protos/SafetyRating + - title: SafetySetting + path: /api/python/google/generativeai/protos/SafetySetting + - title: Schema + path: /api/python/google/generativeai/protos/Schema + - title: Schema.PropertiesEntry + path: /api/python/google/generativeai/protos/Schema/PropertiesEntry + - title: SemanticRetrieverConfig + path: /api/python/google/generativeai/protos/SemanticRetrieverConfig + - title: StringList + path: /api/python/google/generativeai/protos/StringList + - title: TaskType + path: /api/python/google/generativeai/protos/TaskType + - title: TextCompletion + path: /api/python/google/generativeai/protos/TextCompletion + - title: TextPrompt + path: /api/python/google/generativeai/protos/TextPrompt + - title: Tool + path: /api/python/google/generativeai/protos/Tool + - title: ToolConfig + path: /api/python/google/generativeai/protos/ToolConfig + - title: TransferOwnershipRequest + path: /api/python/google/generativeai/protos/TransferOwnershipRequest + - title: TransferOwnershipResponse + path: /api/python/google/generativeai/protos/TransferOwnershipResponse + - title: TunedModel + path: /api/python/google/generativeai/protos/TunedModel + - title: TunedModelSource + path: /api/python/google/generativeai/protos/TunedModelSource + - title: TuningExample + path: /api/python/google/generativeai/protos/TuningExample + - title: TuningExamples + path: /api/python/google/generativeai/protos/TuningExamples + - title: TuningSnapshot + path: /api/python/google/generativeai/protos/TuningSnapshot + - title: TuningTask + path: /api/python/google/generativeai/protos/TuningTask + - title: Type + path: /api/python/google/generativeai/protos/Type + - title: UpdateCachedContentRequest + path: /api/python/google/generativeai/protos/UpdateCachedContentRequest + - title: UpdateChunkRequest + path: /api/python/google/generativeai/protos/UpdateChunkRequest + - title: UpdateCorpusRequest + path: /api/python/google/generativeai/protos/UpdateCorpusRequest + - title: UpdateDocumentRequest + path: /api/python/google/generativeai/protos/UpdateDocumentRequest + - title: UpdatePermissionRequest + path: /api/python/google/generativeai/protos/UpdatePermissionRequest + - title: UpdateTunedModelRequest + path: /api/python/google/generativeai/protos/UpdateTunedModelRequest + - title: VideoMetadata + path: /api/python/google/generativeai/protos/VideoMetadata + - title: types + section: + - title: Overview + path: /api/python/google/generativeai/types + - title: AnyModelNameOptions + path: /api/python/google/generativeai/types/AnyModelNameOptions + - title: AsyncGenerateContentResponse + path: /api/python/google/generativeai/types/AsyncGenerateContentResponse + - title: AuthorError + path: /api/python/google/generativeai/types/AuthorError + - title: BaseModelNameOptions + path: /api/python/google/generativeai/types/BaseModelNameOptions + - title: BlobDict + path: /api/python/google/generativeai/types/BlobDict + - title: BlobType + path: /api/python/google/generativeai/types/BlobType + - title: BlockedPromptException + path: /api/python/google/generativeai/types/BlockedPromptException + - title: BlockedReason + path: /api/python/google/generativeai/types/BlockedReason + - title: BrokenResponseError + path: /api/python/google/generativeai/types/BrokenResponseError + - title: CallableFunctionDeclaration + path: /api/python/google/generativeai/types/CallableFunctionDeclaration + - title: ChatResponse + path: /api/python/google/generativeai/types/ChatResponse + - title: CitationMetadataDict + path: /api/python/google/generativeai/types/CitationMetadataDict + - title: CitationSourceDict + path: /api/python/google/generativeai/types/CitationSourceDict + - title: Completion + path: /api/python/google/generativeai/types/Completion + - title: ContentDict + path: /api/python/google/generativeai/types/ContentDict + - title: ContentFilterDict + path: /api/python/google/generativeai/types/ContentFilterDict + - title: ContentType + path: /api/python/google/generativeai/types/ContentType + - title: ContentsType + path: /api/python/google/generativeai/types/ContentsType + - title: ExampleDict + path: /api/python/google/generativeai/types/ExampleDict + - title: ExampleOptions + path: /api/python/google/generativeai/types/ExampleOptions + - title: ExamplesOptions + path: /api/python/google/generativeai/types/ExamplesOptions + - title: File + path: /api/python/google/generativeai/types/File + - title: FileDataDict + path: /api/python/google/generativeai/types/FileDataDict + - title: FileDataType + path: /api/python/google/generativeai/types/FileDataType + - title: FunctionDeclaration + path: /api/python/google/generativeai/types/FunctionDeclaration + - title: FunctionDeclarationType + path: /api/python/google/generativeai/types/FunctionDeclarationType + - title: FunctionLibrary + path: /api/python/google/generativeai/types/FunctionLibrary + - title: FunctionLibraryType + path: /api/python/google/generativeai/types/FunctionLibraryType + - title: GenerateContentResponse + path: /api/python/google/generativeai/types/GenerateContentResponse + - title: GenerationConfig + path: /api/python/google/generativeai/types/GenerationConfig + - title: GenerationConfigDict + path: /api/python/google/generativeai/types/GenerationConfigDict + - title: GenerationConfigType + path: /api/python/google/generativeai/types/GenerationConfigType + - title: HarmBlockThreshold + path: /api/python/google/generativeai/types/HarmBlockThreshold + - title: HarmCategory + path: /api/python/google/generativeai/types/HarmCategory + - title: HarmProbability + path: /api/python/google/generativeai/types/HarmProbability + - title: IncompleteIterationError + path: /api/python/google/generativeai/types/IncompleteIterationError + - title: MessageDict + path: /api/python/google/generativeai/types/MessageDict + - title: MessageOptions + path: /api/python/google/generativeai/types/MessageOptions + - title: MessagePromptDict + path: /api/python/google/generativeai/types/MessagePromptDict + - title: MessagePromptOptions + path: /api/python/google/generativeai/types/MessagePromptOptions + - title: MessagesOptions + path: /api/python/google/generativeai/types/MessagesOptions + - title: Model + path: /api/python/google/generativeai/types/Model + - title: ModelsIterable + path: /api/python/google/generativeai/types/ModelsIterable + - title: PartDict + path: /api/python/google/generativeai/types/PartDict + - title: PartType + path: /api/python/google/generativeai/types/PartType + - title: Permission + path: /api/python/google/generativeai/types/Permission + - title: Permissions + path: /api/python/google/generativeai/types/Permissions + - title: RequestOptions + path: /api/python/google/generativeai/types/RequestOptions + - title: RequestOptionsType + path: /api/python/google/generativeai/types/RequestOptionsType + - title: ResponseDict + path: /api/python/google/generativeai/types/ResponseDict + - title: SafetyFeedbackDict + path: /api/python/google/generativeai/types/SafetyFeedbackDict + - title: SafetyRatingDict + path: /api/python/google/generativeai/types/SafetyRatingDict + - title: SafetySettingDict + path: /api/python/google/generativeai/types/SafetySettingDict + - title: Status + path: /api/python/google/generativeai/types/Status + - title: StopCandidateException + path: /api/python/google/generativeai/types/StopCandidateException + - title: StrictContentType + path: /api/python/google/generativeai/types/StrictContentType + - title: Tool + path: /api/python/google/generativeai/types/Tool + - title: ToolDict + path: /api/python/google/generativeai/types/ToolDict + - title: ToolsType + path: /api/python/google/generativeai/types/ToolsType + - title: TunedModel + path: /api/python/google/generativeai/types/TunedModel + - title: TunedModelNameOptions + path: /api/python/google/generativeai/types/TunedModelNameOptions + - title: TunedModelState + path: /api/python/google/generativeai/types/TunedModelState + - title: TypedDict + path: /api/python/google/generativeai/types/TypedDict + - title: get_default_file_client + path: /api/python/google/generativeai/types/get_default_file_client + - title: to_file_data + path: /api/python/google/generativeai/types/to_file_data diff --git a/docs/api/google/generativeai/all_symbols.md b/docs/api/google/generativeai/all_symbols.md new file mode 100644 index 000000000..a6fa84caf --- /dev/null +++ b/docs/api/google/generativeai/all_symbols.md @@ -0,0 +1,261 @@ +# All symbols in Generative AI - Python + + + +## Primary symbols +* google.generativeai +* google.generativeai.ChatSession +* google.generativeai.GenerationConfig +* google.generativeai.GenerativeModel +* google.generativeai.chat +* google.generativeai.chat_async +* google.generativeai.configure +* google.generativeai.count_message_tokens +* google.generativeai.count_text_tokens +* google.generativeai.create_tuned_model +* google.generativeai.delete_file +* google.generativeai.delete_tuned_model +* google.generativeai.embed_content +* google.generativeai.embed_content_async +* google.generativeai.generate_embeddings +* google.generativeai.generate_text +* google.generativeai.get_base_model +* google.generativeai.get_file +* google.generativeai.get_model +* google.generativeai.get_operation +* google.generativeai.get_tuned_model +* google.generativeai.list_files +* google.generativeai.list_models +* google.generativeai.list_operations +* google.generativeai.list_tuned_models +* google.generativeai.protos +* google.generativeai.protos.AttributionSourceId +* google.generativeai.protos.AttributionSourceId.GroundingPassageId +* google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk +* google.generativeai.protos.BatchCreateChunksRequest +* google.generativeai.protos.BatchCreateChunksResponse +* google.generativeai.protos.BatchDeleteChunksRequest +* google.generativeai.protos.BatchEmbedContentsRequest +* google.generativeai.protos.BatchEmbedContentsResponse +* google.generativeai.protos.BatchEmbedTextRequest +* google.generativeai.protos.BatchEmbedTextResponse +* google.generativeai.protos.BatchUpdateChunksRequest +* google.generativeai.protos.BatchUpdateChunksResponse +* google.generativeai.protos.Blob +* google.generativeai.protos.CachedContent +* google.generativeai.protos.CachedContent.UsageMetadata +* google.generativeai.protos.Candidate +* google.generativeai.protos.Candidate.FinishReason +* google.generativeai.protos.Chunk +* google.generativeai.protos.Chunk.State +* google.generativeai.protos.ChunkData +* google.generativeai.protos.CitationMetadata +* google.generativeai.protos.CitationSource +* google.generativeai.protos.CodeExecution +* google.generativeai.protos.CodeExecutionResult +* google.generativeai.protos.CodeExecutionResult.Outcome +* google.generativeai.protos.Condition +* google.generativeai.protos.Condition.Operator +* google.generativeai.protos.Content +* google.generativeai.protos.ContentEmbedding +* google.generativeai.protos.ContentFilter +* google.generativeai.protos.ContentFilter.BlockedReason +* google.generativeai.protos.Corpus +* google.generativeai.protos.CountMessageTokensRequest +* google.generativeai.protos.CountMessageTokensResponse +* google.generativeai.protos.CountTextTokensRequest +* google.generativeai.protos.CountTextTokensResponse +* google.generativeai.protos.CountTokensRequest +* google.generativeai.protos.CountTokensResponse +* google.generativeai.protos.CreateCachedContentRequest +* google.generativeai.protos.CreateChunkRequest +* google.generativeai.protos.CreateCorpusRequest +* google.generativeai.protos.CreateDocumentRequest +* google.generativeai.protos.CreateFileRequest +* google.generativeai.protos.CreateFileResponse +* google.generativeai.protos.CreatePermissionRequest +* google.generativeai.protos.CreateTunedModelMetadata +* google.generativeai.protos.CreateTunedModelRequest +* google.generativeai.protos.CustomMetadata +* google.generativeai.protos.Dataset +* google.generativeai.protos.DeleteCachedContentRequest +* google.generativeai.protos.DeleteChunkRequest +* google.generativeai.protos.DeleteCorpusRequest +* google.generativeai.protos.DeleteDocumentRequest +* google.generativeai.protos.DeleteFileRequest +* google.generativeai.protos.DeletePermissionRequest +* google.generativeai.protos.DeleteTunedModelRequest +* google.generativeai.protos.Document +* google.generativeai.protos.EmbedContentRequest +* google.generativeai.protos.EmbedContentResponse +* google.generativeai.protos.EmbedTextRequest +* google.generativeai.protos.EmbedTextResponse +* google.generativeai.protos.Embedding +* google.generativeai.protos.Example +* google.generativeai.protos.ExecutableCode +* google.generativeai.protos.ExecutableCode.Language +* google.generativeai.protos.File +* google.generativeai.protos.File.State +* google.generativeai.protos.FileData +* google.generativeai.protos.FunctionCall +* google.generativeai.protos.FunctionCallingConfig +* google.generativeai.protos.FunctionCallingConfig.Mode +* google.generativeai.protos.FunctionDeclaration +* google.generativeai.protos.FunctionResponse +* google.generativeai.protos.GenerateAnswerRequest +* google.generativeai.protos.GenerateAnswerRequest.AnswerStyle +* google.generativeai.protos.GenerateAnswerResponse +* google.generativeai.protos.GenerateAnswerResponse.InputFeedback +* google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason +* google.generativeai.protos.GenerateContentRequest +* google.generativeai.protos.GenerateContentResponse +* google.generativeai.protos.GenerateContentResponse.PromptFeedback +* google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason +* google.generativeai.protos.GenerateContentResponse.UsageMetadata +* google.generativeai.protos.GenerateMessageRequest +* google.generativeai.protos.GenerateMessageResponse +* google.generativeai.protos.GenerateTextRequest +* google.generativeai.protos.GenerateTextResponse +* google.generativeai.protos.GenerationConfig +* google.generativeai.protos.GetCachedContentRequest +* google.generativeai.protos.GetChunkRequest +* google.generativeai.protos.GetCorpusRequest +* google.generativeai.protos.GetDocumentRequest +* google.generativeai.protos.GetFileRequest +* google.generativeai.protos.GetModelRequest +* google.generativeai.protos.GetPermissionRequest +* google.generativeai.protos.GetTunedModelRequest +* google.generativeai.protos.GroundingAttribution +* google.generativeai.protos.GroundingPassage +* google.generativeai.protos.GroundingPassages +* google.generativeai.protos.HarmCategory +* google.generativeai.protos.Hyperparameters +* google.generativeai.protos.ListCachedContentsRequest +* google.generativeai.protos.ListCachedContentsResponse +* google.generativeai.protos.ListChunksRequest +* google.generativeai.protos.ListChunksResponse +* google.generativeai.protos.ListCorporaRequest +* google.generativeai.protos.ListCorporaResponse +* google.generativeai.protos.ListDocumentsRequest +* google.generativeai.protos.ListDocumentsResponse +* google.generativeai.protos.ListFilesRequest +* google.generativeai.protos.ListFilesResponse +* google.generativeai.protos.ListModelsRequest +* google.generativeai.protos.ListModelsResponse +* google.generativeai.protos.ListPermissionsRequest +* google.generativeai.protos.ListPermissionsResponse +* google.generativeai.protos.ListTunedModelsRequest +* google.generativeai.protos.ListTunedModelsResponse +* google.generativeai.protos.Message +* google.generativeai.protos.MessagePrompt +* google.generativeai.protos.MetadataFilter +* google.generativeai.protos.Model +* google.generativeai.protos.Part +* google.generativeai.protos.Permission +* google.generativeai.protos.Permission.GranteeType +* google.generativeai.protos.Permission.Role +* google.generativeai.protos.QueryCorpusRequest +* google.generativeai.protos.QueryCorpusResponse +* google.generativeai.protos.QueryDocumentRequest +* google.generativeai.protos.QueryDocumentResponse +* google.generativeai.protos.RelevantChunk +* google.generativeai.protos.SafetyFeedback +* google.generativeai.protos.SafetyRating +* google.generativeai.protos.SafetyRating.HarmProbability +* google.generativeai.protos.SafetySetting +* google.generativeai.protos.SafetySetting.HarmBlockThreshold +* google.generativeai.protos.Schema +* google.generativeai.protos.Schema.PropertiesEntry +* google.generativeai.protos.SemanticRetrieverConfig +* google.generativeai.protos.StringList +* google.generativeai.protos.TaskType +* google.generativeai.protos.TextCompletion +* google.generativeai.protos.TextPrompt +* google.generativeai.protos.Tool +* google.generativeai.protos.ToolConfig +* google.generativeai.protos.TransferOwnershipRequest +* google.generativeai.protos.TransferOwnershipResponse +* google.generativeai.protos.TunedModel +* google.generativeai.protos.TunedModel.State +* google.generativeai.protos.TunedModelSource +* google.generativeai.protos.TuningExample +* google.generativeai.protos.TuningExamples +* google.generativeai.protos.TuningSnapshot +* google.generativeai.protos.TuningTask +* google.generativeai.protos.Type +* google.generativeai.protos.UpdateCachedContentRequest +* google.generativeai.protos.UpdateChunkRequest +* google.generativeai.protos.UpdateCorpusRequest +* google.generativeai.protos.UpdateDocumentRequest +* google.generativeai.protos.UpdatePermissionRequest +* google.generativeai.protos.UpdateTunedModelRequest +* google.generativeai.protos.VideoMetadata +* google.generativeai.types +* google.generativeai.types.AnyModelNameOptions +* google.generativeai.types.AsyncGenerateContentResponse +* google.generativeai.types.AuthorError +* google.generativeai.types.BaseModelNameOptions +* google.generativeai.types.BlobDict +* google.generativeai.types.BlobType +* google.generativeai.types.BlockedPromptException +* google.generativeai.types.BlockedReason +* google.generativeai.types.BrokenResponseError +* google.generativeai.types.CallableFunctionDeclaration +* google.generativeai.types.ChatResponse +* google.generativeai.types.CitationMetadataDict +* google.generativeai.types.CitationSourceDict +* google.generativeai.types.Completion +* google.generativeai.types.ContentDict +* google.generativeai.types.ContentFilterDict +* google.generativeai.types.ContentType +* google.generativeai.types.ContentsType +* google.generativeai.types.ExampleDict +* google.generativeai.types.ExampleOptions +* google.generativeai.types.ExamplesOptions +* google.generativeai.types.File +* google.generativeai.types.FileDataDict +* google.generativeai.types.FileDataType +* google.generativeai.types.FunctionDeclaration +* google.generativeai.types.FunctionDeclarationType +* google.generativeai.types.FunctionLibrary +* google.generativeai.types.FunctionLibraryType +* google.generativeai.types.GenerateContentResponse +* google.generativeai.types.GenerationConfig +* google.generativeai.types.GenerationConfigDict +* google.generativeai.types.GenerationConfigType +* google.generativeai.types.HarmBlockThreshold +* google.generativeai.types.HarmCategory +* google.generativeai.types.HarmProbability +* google.generativeai.types.IncompleteIterationError +* google.generativeai.types.MessageDict +* google.generativeai.types.MessageOptions +* google.generativeai.types.MessagePromptDict +* google.generativeai.types.MessagePromptOptions +* google.generativeai.types.MessagesOptions +* google.generativeai.types.Model +* google.generativeai.types.ModelNameOptions +* google.generativeai.types.ModelsIterable +* google.generativeai.types.PartDict +* google.generativeai.types.PartType +* google.generativeai.types.Permission +* google.generativeai.types.Permissions +* google.generativeai.types.RequestOptions +* google.generativeai.types.RequestOptionsType +* google.generativeai.types.ResponseDict +* google.generativeai.types.SafetyFeedbackDict +* google.generativeai.types.SafetyRatingDict +* google.generativeai.types.SafetySettingDict +* google.generativeai.types.Status +* google.generativeai.types.StopCandidateException +* google.generativeai.types.StrictContentType +* google.generativeai.types.Tool +* google.generativeai.types.ToolDict +* google.generativeai.types.ToolsType +* google.generativeai.types.TunedModel +* google.generativeai.types.TunedModelNameOptions +* google.generativeai.types.TunedModelState +* google.generativeai.types.TypedDict +* google.generativeai.types.get_default_file_client +* google.generativeai.types.to_file_data +* google.generativeai.update_tuned_model +* google.generativeai.upload_file \ No newline at end of file diff --git a/docs/api/google/generativeai/api_report.pb b/docs/api/google/generativeai/api_report.pb new file mode 100644 index 0000000000000000000000000000000000000000..96c5f00568db0f4db860421c667f7d315faa1484 GIT binary patch literal 49595 zcmc&-&66BQm9JK?8;&9fPw)`|T0Um^G#_rpV|KL|mMN(v*$(XRpcw-}5ZkKhu9<16 zyQ@`QEoo$1G~QkCvak!mg|EW|7YNQAIL?XS$dLnw;li0efy|d#)mc@UFF!gpejb zMIWF)eT@7aJx%}d;gI~RbNbtlUilRLdAURWvmkhm{tW+15L|he1mWT|{J7-TB5*fL z?v19yjn2h?fIkzkHZFq%fW`m(?F&yB;5`gKqr;mg(_w!?w2qGwg8F2k!a3D-$#mVN z_##q8_i|4%2p zWFo*}mQMPEcyOEy`-60vC(~Tqei7Y%d<6pO_V4Id;X9r1b0)HI7D!}*fFE$9fFDlT zR)}PNu-uXR!JD1%ITt}HD|2q>5Go;-ov>>n#5|9)ygwjhC@u(a-*RWgeK#+Q+(-C8 zSP+#6x5D?zBmk@nEAH94OcsCZGGGDs1VPRJ1uPZU&cY9>u((-$Bc|a8q=@PCaCEfD zk_`dyOxWK`N^xK0^hzU65Y*^Z5l&{|Q*uDcDxA{`cDVonh3_G`Q4IphB>b2fF;DK7 zWX9l@Y|A>s-IvG9L6*dM($5!DLLL*Kaly1Rx_frn?)f97j^7HVUGA9e)PX!jAQ2-W z$cLjbT}>LIxl7z^a+IE*Zpqc-AXW7!R>aJ?;es^}|5>(j$>cyFV8Mza?`<{#t5ysY zF*prBp#?3Sk!r~I=av*u+@A`-6FFc>@SyH+G(DP&&bSnEok$98z&8p5m(bKZL{O3B z*V#dd9BQpyFC8*iAvFRJVrD zi)u0Va8PR`cM%Gh&Y(;ng#q*WfM;p0x-I0%DTS)KeVLDo8aglBfjlOTNAnywje3PZ zYLacKC$&3r<^&8_w~FhDY6aAq8_2?`Lj7&IH370*xteZJzcum-gKc?F9s&VOSUdk( zzAjJSW?7o2^X}z5&qfD}oD{En=^`5>FAhm<^CV;wP)a~q11S~Q`Q1WLG$~#GzY|`T zIlUUb`+P>q8%nmjF_f2Ti6o1m-)KMXgx4%3+Dj(!G#?FaB*a+B-6ZQBFQ&IucDmX0 zZoO3B?u5@GQeBDj!Eq1r7r>t1NbW3>d9IRy)ufg5S1$u8H>7nBUn_|W^I1BbCk-KZ za>Mw>QV0`6T!nP%@fzY9&47BW0@baCB$nw{<3Bs$zvy;EO$dBBPMa+ey%Z1S9SLzu zfDrp6O;PPho_{)Rhi`??7#V@#w_pI1{rKu^ND7O`yC17E;D&HoiHSTw7dv5`9yEA> zr#lhdc##mGO#;VVd6CNDde8yl?$^MLSQQ6EeAO~w{Z}WHcEV)9-B)S*^W`Lu$@<3) zF*pqdF7Y4>KZ@-8UQ7Zc-E$30UI{^Dj3Tl9q4#y%*GkOp_0e=RKfaN~bLeapv?rhPj%W}-)&iUFZEO=5H{8D!e!*UjgAr+e z(avmD03iQr*Gs2rd~lK{ zMQ2=$K7Y0(gNO3|Ysm%N3#N%R=Kul*UcYXTz0!?e`sNQ_qTv?xvY_65``03qNT9%{mK>T!p6! z5e2r*RmOOFNSgcC?k5B2YY<%8oZ|VwlA`Te@*>_t>4|7JlKEn+tjkX+Ve({vAIaHqD7&tr?N7NZT`CPs-IrSVDR|Bd&zBwM3c8;B>~ zn0*a%_@w$Z%;A;$N%LgKad$krV33mZkur0G7_dZ7_o-oh+qmb_Sqi5~rn%}K z{d^`rPAv);KBlBNR@tNp$)79}5>3j-5TP+k4^j#r4+s)WB=XLbQ?;gP%rWTeqj8>; z+*0BOLUZUXOO;dkF>7h$Y_^y;6=i@d-?0SL9EbF1tLJ4l0l5#Oc&h$4*BThrHuKb+ z0?35Gd%sH{dP`GiI)l^_v;p6q8AvZkIucs0eocZFY@iFQR04Dx@Tl&z4Ri&F0IpD- zZ$hW#*KI;}E-AL;Rj$B^he=_1GN@UA)#0cLoSWd3r8;)Kam@lS@RV|p3Z1L&gpQxW zv2)?8>0mJ-aXFlB8T#X+TZZ2j z-D+H&f$JG>oy9j2LC-eph;TNgtKNadbHCwdI^Q z(8H1{Iv!`85*)mq)lL|3XJ_Z!mT4uP+X&DGYTH^JKP>`m@AW$#&#v8%Co^(OLD8&$ zoo#!njE8P^P7@!D6WT0S0soMuEx)sLDGiZSk-IO&)1w8w&d7CV$?9sP1+oX*K=$2E z*h3nE3K809ixOjCBPA+Hd>)pNRQBH;jN!FK>SzX^jZ#N5cx{*B6Hcnl;IU_lj}y}D zrtrg{=vwlzzBB5WwoE&hkKglvPT^V@Y{7fdE6_IF9iM&>uzfl>K3`G;^>*rEtg25Z zHdfWE+h*ux4#B`4Jr&P|DW-Iyh38}u3!`@3wuSsYUg52a>42V__Tn+Uh%ddY@@twe z*pdWZ_gxoY$)QRQ?&qrTfuwAc1x?{E6`)JIK7|zZlm&Xtwo-hr6JAH6v|5TbXpEQy zJIF_b@yTnN=?aY$-iXfK7(SWL-56e}@J)Qy09g}{+=v(Lg$!4d!8nH5bmgqaP0CmA zwHyz|tL6foOr*)LX>o=Qt`KP3b4i>k`D-c3<>~xhlBxO?=fY{QZC93%6jseh1?Vzy z_VSZea#?50rAxT_Q73#EVN9(sZ4jr1#q~1z>q#;^hzH6$!as3IPk-78zidG> z%pz5Jx>v^O;PwswSGP_(;Ws_X)&?UI^S$a*M4trtvrhO`-?Dk*OW%g`NuRzA=ao%= z(h2u{uIeKvS5o8B7uHk>T)^(x!l8gIxspCCHqlj(YbgQF@8>ja`<`X`ta+P4AwqYT zJV7_pGW-^0c-zwZ=Vpmy%_udATI2X%cEbNK*v{tCwZZblgkSS3q)(pzMJN2af9bq2 zy?^5kJYER)Z#=JTZy1U!+`}W^{~vx`O2u`W2M&a>DqE^E%kKGnwJHFe2_1x%0#GRd z9)M110J3eO;N7EQO|Yj`HdqASbSWqBk&h}mt~?|Fwi^I%UU7KZw zKXRvn27a@pR(zzb{=S~%))afM`P5~%<43nI#Mz`r-VZrSvy(z)hO(No0$BS{iru>u zVcRG=@E0yB=sv57yJ!jBfb~`;Sg7kW|F=-rYhsU&dQ{BpF=NNW#tSFp{`QOFy6}i*7D6V3B1v6G3YPUp z(gOVW!HG%nY)Qn%72kwTl;!BG0XRt$(=1lyE$dhvuZuwQ0vuNz0{6M)GkAE`MA3^h zwUuSZPu6SjxcW{jkLN8EpC-ym)%m(dE z*cRKGM{DI0v{8iOS2`4O_$)^Wkz+G?hy>m$QzskvtWn);;I&-EAFWla!XqnO1^gSS zdJgm4C6h2-eTv+M3*JXRpiooY>$_to=*I$N$liR;PHCUcW~a1QC%cD!uA^>mkKTnB z=3iMP*@@-=N9YXM#yY$(uYfBuRDOKqRH@*FIiMGnxvDF{GmTRhcEJnt8hi`t^6AfH+DAOlaA_u|7OKhe#*Li%c(r{hNn4EP3l+gS3z9WP5DiaceR zQQ~%HuzN`^#SNHqfx0ux>HvS>ZjXHObkh9B1RueJeErN4kPLfcA6H?BbhvDmWcetW zUz_G|YEypVn15{+_80khB3B-aZp$nrZ^Fg_n_pm(PX(MZ`!8uCZ(A+l<;x!V^wac+ zHX|C&#W`)$rFy_GSMJC2+gQQk1c}G%0A! zEW;j7e-y4K=UlF1&t`s@WUt(tl0(kp(M*-s@zCRxR%-aLDMFds37-{+ zTrx_H$;&tW8WBf)3B&g0B!0fL>M|=n+Q2RC6nXH>ia?VHt9gLvl-k+WeKoux;z~}^ z*)YtO%l0+_-kZj=`Ee>qD}-FqHl5P$EMjTO(2bqN6DVLwTTl&QA&plj?!d<~Xxh|9 z4l_Z&cmk+C^Oq?)CtAXJ%UulA^O?68sOL3j!9N=V=Jc4aXl7XWCmT&3^5m?$bcr%} z$4K~Yq$aom_1ba*-ZA3f=)f^o-3c9U*)Vin%V-^ZO;yVTSTzgVSO6mtR#CqV5%948 z)zL6Xi$m8&JmSfv)9RClN~E9i%t>e&55-_r)V}4jt#@Q!VV9ibYKkG!84bFFaxJCGzig!mF~o2d465#d{)t z#CcQkj*EdfM3QuTxBjikrd{-(*dm9x(ht`d2tne-a5SiW^b3$09#YvujxfpY0K7o3qB@BwZ6L4^qs#W=T+6(lln5E@Bogz;waR#@O zAuFG+-5(?~Uc3w1IeDF;d0auo-$8IHaTFiHk-)Z*oGJ#hW#~SWRK;Mn3f5b)YN4*r zq}4)QugR;4Be5;r^_avex|w`&Gy2Js;)N**GdcOLDsN_(^w~f9TyXKeRr7VNB@NRN zjoT5cp=ZF4mi;&KGLQ7>Uab@?Y#;LH0*fypGfBW6->kIe%%b*NiVM7k(#*0nFTRYu zZUTV^OJsTIwiE^7pXB7nNa7QbW;RiLu9%zTI3^EQRPwpidDWIMRrhOQHI_U25UuEY zH&MG;G8>;%vo@zT(p~whpAtlAGU9AsMe)XQYu}aS*+TU-PQKNgLaAaJyo%1clyYq5 zaZbG@#l!3zTYbo|6nb{Hw~}-Dc}g9VI#Dy>bY5dL;pAnJr$kWEc`g)$3XDzNO4pMY z;?U#2Y02fNmb~TM)Y2(sZaoRK_zxo2Q zN1N!aWHh8RCB3t&v;mVsT=8`?nTJ?|NRnUrPGA(>_NoP58XaUY4BpV1n5)b(TE(YM zmOEG8C0XB#59Bj8QaInLD(3VVwq+X*lBhpgrfuX)9MYGhQ3=?V^W1=2LlV!uw>QGi zOZG_@H;KQl8#(=Vz?ViD2-PkPH^nvUiU}I8(6DwJoqJ=U+cC%NYK>nadkQ;fPEBvJ z!~3B7$7GF<)A6uq$J408J{-*Q+rXvYsQT;)Gj$WZjRu+GiyTT3e0IqcALmA~y|v10 z75CXMvsK({(@b$kKxuWtE(a%hGUsJGZy>>j|Emyj0)vAVv^^m{8=-PDr}{%{?c)8F zHv9{1QHm;iz5Igqfg7mae9Bx|pWbAytXDr$d}L?kN9DHi=s}A4sTJsMmL9|hqwy$L zJ|_hI0^8Ae#T_~+a2tN4xLzd3_Zg%V!;orVZ{B33vQLMyQrW9dp$AEnw}kCY>oH$} z3B`SJI$-_H^@_pv$V*UG&{{u-@w>F&~`B!lE(>E#)eFyIsjQUwyN)TBEM(HxS;{>WN#I;Vi3Fz zU5vfMp~P~BkdwN#s(0Xr*h$t(-+6bCB{h35VRI|H3@dv_)Dr@SK4%lZVsL(fCn06C zt@fIHKD=S@Zq2KgJ0856v{*iC;$Cx#UaDO(ix(MQmrtxW4KSv6DeJt(`9*GigZVkM zCKH#QqL&tcVKcVrB*^*=PrfLH9A${GIloTSPm3!}Didrrdu-JmsB(FHJOdZT&?07n zkOLkh!0}1rS76|e^3Up8ahXLxs4GGa{q6f%3xK6u`)u>9q9a7i|yUi8QBPwoU zvh80(BVc=+^8e{aB=!jMg^LBFL0}^cq)FAK1<3-s}<9bSVw_Z>5fVP}$MJW+5-`q49x0_z`uP80Y;%GCJ2Ej7Q|_cN+|P-C<>Pe2&-? z5J|X+Ii9)zud1rzegz(DarATgxIlkGz8ya<@R(`eYQ~u=UZgPXo$y1lbzIEGX*>k& W2vAvSp3*x1ltD^D8-dgpdw&OMgDV06 literal 0 HcmV?d00001 diff --git a/docs/api/google/generativeai/chat.md b/docs/api/google/generativeai/chat.md new file mode 100644 index 000000000..1dc51c8f6 --- /dev/null +++ b/docs/api/google/generativeai/chat.md @@ -0,0 +1,198 @@ +description: Calls the API to initiate a chat with a model using provided parameters + +
+ + +
+ +# google.generativeai.chat + + + + + + + + + +Calls the API to initiate a chat with a model using provided parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +Which model to call, as a string or a types.Model. +
+`context` + +Text that should be provided to the model first, to ground the response. + +If not empty, this `context` will be given to the model first before the +`examples` and `messages`. + +This field can be a description of your prompt to the model to help provide +context and guide the responses. + +Examples: + +* "Translate the phrase from English to French." +* "Given a statement, classify the sentiment as happy, sad or neutral." + +Anything included in this field will take precedence over history in `messages` +if the total input size exceeds the model's Model.input_token_limit. +
+`examples` + +Examples of what the model should generate. + +This includes both the user input and the response that the model should +emulate. + +These `examples` are treated identically to conversation messages except +that they take precedence over the history in `messages`: +If the total input size exceeds the model's `input_token_limit` the input +will be truncated. Items will be dropped from `messages` before `examples` +
+`messages` + +A snapshot of the conversation history sorted chronologically. + +Turns alternate between two authors. + +If the total input size exceeds the model's `input_token_limit` the input +will be truncated: The oldest items will be dropped from `messages`. +
+`temperature` + +Controls the randomness of the output. Must be positive. + +Typical values are in the range: `[0.0,1.0]`. Higher values produce a +more random and varied response. A temperature of zero will be deterministic. +
+`candidate_count` + +The **maximum** number of generated response messages to return. + +This value must be between `[1, 8]`, inclusive. If unset, this +will default to `1`. + +Note: Only unique candidates are returned. Higher temperatures are more +likely to produce unique candidates. Setting `temperature=0.0` will always +return 1 candidate regardless of the `candidate_count`. +
+`top_k` + +The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and +top-k sampling. + +`top_k` sets the maximum number of tokens to sample from on each step. +
+`top_p` + +The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and +top-k sampling. + +`top_p` configures the nucleus sampling. It sets the maximum cumulative + probability of tokens to sample from. + + For example, if the sorted probabilities are + `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample + as `[0.625, 0.25, 0.125, 0, 0, 0]`. + + Typical values are in the `[0.9, 1.0]` range. +
+`prompt` + +You may pass a types.MessagePromptOptions **instead** of a +setting `context`/`examples`/`messages`, but not both. +
+`client` + +If you're not relying on the default client, you pass a +`glm.DiscussServiceClient` instead. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A types.ChatResponse containing the model's reply. +
+ diff --git a/docs/api/google/generativeai/chat_async.md b/docs/api/google/generativeai/chat_async.md new file mode 100644 index 000000000..614456c1f --- /dev/null +++ b/docs/api/google/generativeai/chat_async.md @@ -0,0 +1,198 @@ +description: Calls the API to initiate a chat with a model using provided parameters + +
+ + +
+ +# google.generativeai.chat_async + + + + + + + + + +Calls the API to initiate a chat with a model using provided parameters + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +Which model to call, as a string or a types.Model. +
+`context` + +Text that should be provided to the model first, to ground the response. + +If not empty, this `context` will be given to the model first before the +`examples` and `messages`. + +This field can be a description of your prompt to the model to help provide +context and guide the responses. + +Examples: + +* "Translate the phrase from English to French." +* "Given a statement, classify the sentiment as happy, sad or neutral." + +Anything included in this field will take precedence over history in `messages` +if the total input size exceeds the model's Model.input_token_limit. +
+`examples` + +Examples of what the model should generate. + +This includes both the user input and the response that the model should +emulate. + +These `examples` are treated identically to conversation messages except +that they take precedence over the history in `messages`: +If the total input size exceeds the model's `input_token_limit` the input +will be truncated. Items will be dropped from `messages` before `examples` +
+`messages` + +A snapshot of the conversation history sorted chronologically. + +Turns alternate between two authors. + +If the total input size exceeds the model's `input_token_limit` the input +will be truncated: The oldest items will be dropped from `messages`. +
+`temperature` + +Controls the randomness of the output. Must be positive. + +Typical values are in the range: `[0.0,1.0]`. Higher values produce a +more random and varied response. A temperature of zero will be deterministic. +
+`candidate_count` + +The **maximum** number of generated response messages to return. + +This value must be between `[1, 8]`, inclusive. If unset, this +will default to `1`. + +Note: Only unique candidates are returned. Higher temperatures are more +likely to produce unique candidates. Setting `temperature=0.0` will always +return 1 candidate regardless of the `candidate_count`. +
+`top_k` + +The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and +top-k sampling. + +`top_k` sets the maximum number of tokens to sample from on each step. +
+`top_p` + +The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and +top-k sampling. + +`top_p` configures the nucleus sampling. It sets the maximum cumulative + probability of tokens to sample from. + + For example, if the sorted probabilities are + `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample + as `[0.625, 0.25, 0.125, 0, 0, 0]`. + + Typical values are in the `[0.9, 1.0]` range. +
+`prompt` + +You may pass a types.MessagePromptOptions **instead** of a +setting `context`/`examples`/`messages`, but not both. +
+`client` + +If you're not relying on the default client, you pass a +`glm.DiscussServiceClient` instead. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A types.ChatResponse containing the model's reply. +
+ diff --git a/docs/api/google/generativeai/configure.md b/docs/api/google/generativeai/configure.md new file mode 100644 index 000000000..f0b5f4006 --- /dev/null +++ b/docs/api/google/generativeai/configure.md @@ -0,0 +1,80 @@ +description: Captures default client configuration. + +
+ + +
+ +# google.generativeai.configure + + + + + + + + + +Captures default client configuration. + + + + + + + + +If no API key has been provided (either directly, or on `client_options`) and the +`GOOGLE_API_KEY` environment variable is set, it will be used as the API key. + +Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in +`google.ai.generativelanguage` for details on the other arguments. + + + + + + + + + + + + + + + + +
+`transport` + +A string, one of: [`rest`, `grpc`, `grpc_asyncio`]. +
+`api_key` + +The API-Key to use when creating the default clients (each service uses +a separate client). This is a shortcut for `client_options={"api_key": api_key}`. +If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be +used. +
+`default_metadata` + +Default (key, value) metadata pairs to send with every request. +when using `transport="rest"` these are sent as HTTP headers. +
+ diff --git a/docs/api/google/generativeai/count_message_tokens.md b/docs/api/google/generativeai/count_message_tokens.md new file mode 100644 index 000000000..7ec05db9b --- /dev/null +++ b/docs/api/google/generativeai/count_message_tokens.md @@ -0,0 +1,41 @@ +description: Calls the API to calculate the number of tokens used in the prompt. + +
+ + +
+ +# google.generativeai.count_message_tokens + + + + + + + + + +Calls the API to calculate the number of tokens used in the prompt. + + + + + + + diff --git a/docs/api/google/generativeai/count_text_tokens.md b/docs/api/google/generativeai/count_text_tokens.md new file mode 100644 index 000000000..a15f0f2aa --- /dev/null +++ b/docs/api/google/generativeai/count_text_tokens.md @@ -0,0 +1,37 @@ +description: Calls the API to count the number of tokens in the text prompt. + +
+ + +
+ +# google.generativeai.count_text_tokens + + + + + + + + + +Calls the API to count the number of tokens in the text prompt. + + + + + + + diff --git a/docs/api/google/generativeai/create_tuned_model.md b/docs/api/google/generativeai/create_tuned_model.md new file mode 100644 index 000000000..c12179164 --- /dev/null +++ b/docs/api/google/generativeai/create_tuned_model.md @@ -0,0 +1,198 @@ +description: Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress. + +
+ + +
+ +# google.generativeai.create_tuned_model + + + + + + + + + +Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress. + + + + + + + + +Since tuning a model can take significant time, this API doesn't wait for the tuning to complete. +Instead, it returns a `google.api_core.operation.Operation` object that lets you check on the +status of the tuning job, or wait for it to complete, and check the result. + +After the job completes you can either find the resulting `TunedModel` object in +`Operation.result()` or `palm.list_tuned_models` or `palm.get_tuned_model(model_id)`. + +``` +my_id = "my-tuned-model-id" +operation = palm.create_tuned_model( + id = my_id, + source_model="models/text-bison-001", + training_data=[{'text_input': 'example input', 'output': 'example output'},...] +) +tuned_model=operation.result() # Wait for tuning to finish + +palm.generate_text(f"tunedModels/{my_id}", prompt="...") +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`source_model` + +The name of the model to tune. +
+`training_data` + +The dataset to tune the model on. This must be either: +* A protos.Dataset, or +* An `Iterable` of: + *protos.TuningExample, + * `{'text_input': text_input, 'output': output}` dicts + * `(text_input, output)` tuples. +* A `Mapping` of `Iterable[str]` - use `input_key` and `output_key` to choose which + columns to use as the input/output +* A csv file (will be read with `pd.read_csv` and handles as a `Mapping` + above). This can be: + * A local path as a `str` or `pathlib.Path`. + * A url for a csv file. + * The url of a Google Sheets file. +* A JSON file - Its contents will be handled either as an `Iterable` or `Mapping` + above. This can be: + * A local path as a `str` or `pathlib.Path`. +
+`id` + +The model identifier, used to refer to the model in the API +`tunedModels/{id}`. Must be unique. +
+`display_name` + +A human-readable name for display. +
+`description` + +A description of the tuned model. +
+`temperature` + +The default temperature for the tuned model, see types.Model for details. +
+`top_p` + +The default `top_p` for the model, see types.Model for details. +
+`top_k` + +The default `top_k` for the model, see types.Model for details. +
+`epoch_count` + +The number of tuning epochs to run. An epoch is a pass over the whole dataset. +
+`batch_size` + +The number of examples to use in each training batch. +
+`learning_rate` + +The step size multiplier for the gradient updates. +
+`client` + +Which client to use. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A [`google.api_core.operation.Operation`](https://googleapis.dev/python/google-api-core/latest/operation.html) +
+ diff --git a/docs/api/google/generativeai/delete_file.md b/docs/api/google/generativeai/delete_file.md new file mode 100644 index 000000000..1098c3afb --- /dev/null +++ b/docs/api/google/generativeai/delete_file.md @@ -0,0 +1,34 @@ +description: Calls the API to permanently delete a specified file using a supported file service. + +
+ + +
+ +# google.generativeai.delete_file + + + + + + + + + +Calls the API to permanently delete a specified file using a supported file service. + + + + + + + diff --git a/docs/api/google/generativeai/delete_tuned_model.md b/docs/api/google/generativeai/delete_tuned_model.md new file mode 100644 index 000000000..d5c4fa089 --- /dev/null +++ b/docs/api/google/generativeai/delete_tuned_model.md @@ -0,0 +1,36 @@ +description: Calls the API to delete a specified tuned model + +
+ + +
+ +# google.generativeai.delete_tuned_model + + + + + + + + + +Calls the API to delete a specified tuned model + + + + + + + diff --git a/docs/api/google/generativeai/embed_content.md b/docs/api/google/generativeai/embed_content.md new file mode 100644 index 000000000..e6cb45c6f --- /dev/null +++ b/docs/api/google/generativeai/embed_content.md @@ -0,0 +1,112 @@ +description: Calls the API to create embeddings for content passed in. + +
+ + +
+ +# google.generativeai.embed_content + + + + + + + + + +Calls the API to create embeddings for content passed in. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + + Which [model](https://ai.google.dev/models/gemini#embedding) to +call, as a string or a types.Model. +
+`content` + + Content to embed. +
+`task_type` + + Optional task type for which the embeddings will be used. Can only +be set for `models/embedding-001`. +
+`title` + + An optional title for the text. Only applicable when task_type is +`RETRIEVAL_DOCUMENT`. +
+`output_dimensionality` + + Optional reduced dimensionality for the output embeddings. If set, +excessive values from the output embeddings will be truncated from +the end. +
+`request_options` + + Options for the request. +
+ + + + + + + + + + + +
+Dictionary containing the embedding (list of float values) for the +input content. +
+ diff --git a/docs/api/google/generativeai/embed_content_async.md b/docs/api/google/generativeai/embed_content_async.md new file mode 100644 index 000000000..bbf132fd1 --- /dev/null +++ b/docs/api/google/generativeai/embed_content_async.md @@ -0,0 +1,40 @@ +description: Calls the API to create async embeddings for content passed in. + +
+ + +
+ +# google.generativeai.embed_content_async + + + + + + + + + +Calls the API to create async embeddings for content passed in. + + + + + + + diff --git a/docs/api/google/generativeai/generate_embeddings.md b/docs/api/google/generativeai/generate_embeddings.md new file mode 100644 index 000000000..9d1fd8beb --- /dev/null +++ b/docs/api/google/generativeai/generate_embeddings.md @@ -0,0 +1,90 @@ +description: Calls the API to create an embedding for the text passed in. + +
+ + +
+ +# google.generativeai.generate_embeddings + + + + + + + + + +Calls the API to create an embedding for the text passed in. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +Which model to call, as a string or a types.Model. +
+`text` + +Free-form input text given to the model. Given a string, the model will +generate an embedding based on the input text. +
+`client` + +If you're not relying on a default client, you pass a `glm.TextServiceClient` instead. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+Dictionary containing the embedding (list of float values) for the input text. +
+ diff --git a/docs/api/google/generativeai/generate_text.md b/docs/api/google/generativeai/generate_text.md new file mode 100644 index 000000000..91225fd70 --- /dev/null +++ b/docs/api/google/generativeai/generate_text.md @@ -0,0 +1,172 @@ +description: Calls the API to generate text based on the provided prompt. + +
+ + +
+ +# google.generativeai.generate_text + + + + + + + + + +Calls the API to generate text based on the provided prompt. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +Which model to call, as a string or a types.Model. +
+`prompt` + +Free-form input text given to the model. Given a prompt, the model will +generate text that completes the input text. +
+`temperature` + +Controls the randomness of the output. Must be positive. +Typical values are in the range: `[0.0,1.0]`. Higher values produce a +more random and varied response. A temperature of zero will be deterministic. +
+`candidate_count` + +The **maximum** number of generated response messages to return. +This value must be between `[1, 8]`, inclusive. If unset, this +will default to `1`. + +Note: Only unique candidates are returned. Higher temperatures are more +likely to produce unique candidates. Setting `temperature=0.0` will always +return 1 candidate regardless of the `candidate_count`. +
+`max_output_tokens` + +Maximum number of tokens to include in a candidate. Must be greater +than zero. If unset, will default to 64. +
+`top_k` + +The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling. +`top_k` sets the maximum number of tokens to sample from on each step. +
+`top_p` + +The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling. +`top_p` configures the nucleus sampling. It sets the maximum cumulative +probability of tokens to sample from. +For example, if the sorted probabilities are +`[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample +as `[0.625, 0.25, 0.125, 0, 0, 0]`. +
+`safety_settings` + +A list of unique `types.SafetySetting` instances for blocking unsafe content. +These will be enforced on the `prompt` and +`candidates`. There should not be more than one +setting for each `types.SafetyCategory` type. The API will block any prompts and +responses that fail to meet the thresholds set by these settings. This list +overrides the default settings for each `SafetyCategory` specified in the +safety_settings. If there is no `types.SafetySetting` for a given +`SafetyCategory` provided in the list, the API will use the default safety +setting for that category. +
+`stop_sequences` + +A set of up to 5 character sequences that will stop output generation. +If specified, the API will stop at the first appearance of a stop +sequence. The stop sequence will not be included as part of the response. +
+`client` + +If you're not relying on a default client, you pass a `glm.TextServiceClient` instead. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A types.Completion containing the model's text completion response. +
+ diff --git a/docs/api/google/generativeai/get_base_model.md b/docs/api/google/generativeai/get_base_model.md new file mode 100644 index 000000000..a71ef12f5 --- /dev/null +++ b/docs/api/google/generativeai/get_base_model.md @@ -0,0 +1,87 @@ +description: Calls the API to fetch a base model by name. + +
+ + +
+ +# google.generativeai.get_base_model + + + + + + + + + +Calls the API to fetch a base model by name. + + + + + + + + +``` +import pprint +model = genai.get_base_model('models/chat-bison-001') +pprint.pprint(model) +``` + + + + + + + + + + + + + + + + +
+`name` + +The name of the model to fetch. Should start with `models/` +
+`client` + +The client to use. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A types.Model. +
+ diff --git a/docs/api/google/generativeai/get_file.md b/docs/api/google/generativeai/get_file.md new file mode 100644 index 000000000..5377161be --- /dev/null +++ b/docs/api/google/generativeai/get_file.md @@ -0,0 +1,34 @@ +description: Calls the API to retrieve a specified file using a supported file service. + +
+ + +
+ +# google.generativeai.get_file + + + + + + + + + +Calls the API to retrieve a specified file using a supported file service. + + + + + + + diff --git a/docs/api/google/generativeai/get_model.md b/docs/api/google/generativeai/get_model.md new file mode 100644 index 000000000..e488dbfaa --- /dev/null +++ b/docs/api/google/generativeai/get_model.md @@ -0,0 +1,87 @@ +description: Calls the API to fetch a model by name. + +
+ + +
+ +# google.generativeai.get_model + + + + + + + + + +Calls the API to fetch a model by name. + + + + + + + + +``` +import pprint +model = genai.get_model('models/gemini-pro') +pprint.pprint(model) +``` + + + + + + + + + + + + + + + + +
+`name` + +The name of the model to fetch. Should start with `models/` +
+`client` + +The client to use. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A types.Model +
+ diff --git a/docs/api/google/generativeai/get_operation.md b/docs/api/google/generativeai/get_operation.md new file mode 100644 index 000000000..74bb706e6 --- /dev/null +++ b/docs/api/google/generativeai/get_operation.md @@ -0,0 +1,34 @@ +description: Calls the API to get a specific operation + +
+ + +
+ +# google.generativeai.get_operation + + + + + + + + + +Calls the API to get a specific operation + + + + + + + diff --git a/docs/api/google/generativeai/get_tuned_model.md b/docs/api/google/generativeai/get_tuned_model.md new file mode 100644 index 000000000..f420a3beb --- /dev/null +++ b/docs/api/google/generativeai/get_tuned_model.md @@ -0,0 +1,87 @@ +description: Calls the API to fetch a tuned model by name. + +
+ + +
+ +# google.generativeai.get_tuned_model + + + + + + + + + +Calls the API to fetch a tuned model by name. + + + + + + + + +``` +import pprint +model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001') +pprint.pprint(model) +``` + + + + + + + + + + + + + + + + +
+`name` + +The name of the model to fetch. Should start with `tunedModels/` +
+`client` + +The client to use. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+A types.TunedModel. +
+ diff --git a/docs/api/google/generativeai/list_files.md b/docs/api/google/generativeai/list_files.md new file mode 100644 index 000000000..ccb3cb453 --- /dev/null +++ b/docs/api/google/generativeai/list_files.md @@ -0,0 +1,34 @@ +description: Calls the API to list files using a supported file service. + +
+ + +
+ +# google.generativeai.list_files + + + + + + + + + +Calls the API to list files using a supported file service. + + + + + + + diff --git a/docs/api/google/generativeai/list_models.md b/docs/api/google/generativeai/list_models.md new file mode 100644 index 000000000..5d19f917b --- /dev/null +++ b/docs/api/google/generativeai/list_models.md @@ -0,0 +1,87 @@ +description: Calls the API to list all available models. + +
+ + +
+ +# google.generativeai.list_models + + + + + + + + + +Calls the API to list all available models. + + + + + + + + +``` +import pprint +for model in genai.list_models(): + pprint.pprint(model) +``` + + + + + + + + + + + + + + + + +
+`page_size` + +How many `types.Models` to fetch per page (api call). +
+`client` + +You may pass a `glm.ModelServiceClient` instead of using the default client. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+types.Model objects. +
+ diff --git a/docs/api/google/generativeai/list_operations.md b/docs/api/google/generativeai/list_operations.md new file mode 100644 index 000000000..bfcd6f641 --- /dev/null +++ b/docs/api/google/generativeai/list_operations.md @@ -0,0 +1,34 @@ +description: Calls the API to list all operations + +
+ + +
+ +# google.generativeai.list_operations + + + + + + + + + +Calls the API to list all operations + + + + + + + diff --git a/docs/api/google/generativeai/list_tuned_models.md b/docs/api/google/generativeai/list_tuned_models.md new file mode 100644 index 000000000..07306d3c7 --- /dev/null +++ b/docs/api/google/generativeai/list_tuned_models.md @@ -0,0 +1,87 @@ +description: Calls the API to list all tuned models. + +
+ + +
+ +# google.generativeai.list_tuned_models + + + + + + + + + +Calls the API to list all tuned models. + + + + + + + + +``` +import pprint +for model in genai.list_tuned_models(): + pprint.pprint(model) +``` + + + + + + + + + + + + + + + + +
+`page_size` + +How many `types.Models` to fetch per page (api call). +
+`client` + +You may pass a `glm.ModelServiceClient` instead of using the default client. +
+`request_options` + +Options for the request. +
+ + + + + + + + + + + +
+types.TunedModel objects. +
+ diff --git a/docs/api/google/generativeai/protos.md b/docs/api/google/generativeai/protos.md new file mode 100644 index 000000000..8ab8c8efc --- /dev/null +++ b/docs/api/google/generativeai/protos.md @@ -0,0 +1,368 @@ +description: This module provides low level access to the ProtoBuffer "Message" classes used by the API. + +
+ + +
+ +# Module: google.generativeai.protos + + + + + + + + + +This module provides low level access to the ProtoBuffer "Message" classes used by the API. + + +**For typical usage of this SDK you do not need to use any of these classes.** + +ProtoBufers are Google API's serilization format. They are strongly typed and efficient. + +The `genai` SDK tries to be permissive about what objects it will accept from a user, but in the end +the SDK always converts input to an appropriate Proto Message object to send as the request. Each API request +has a `*Request` and `*Response` Message defined here. + +If you have any uncertainty about what the API may accept or return, these classes provide the +complete/unambiguous answer. They come from the `google-ai-generativelanguage` package which is +generated from a snapshot of the API definition. + +``` +>>> from google.generativeai import protos +>>> import inspect +>>> print(inspect.getsource(protos.Part)) +``` + +Proto classes can have "oneof" fields. Use `in` to check which `oneof` field is set. + +``` +>>> p = protos.Part(text='hello') +>>> 'text' in p +True +>>> p.inline_data = {'mime_type':'image/png', 'data': b'PNG'} +>>> type(p.inline_data) is protos.Blob +True +>>> 'inline_data' in p +True +>>> 'text' in p +False +``` + +Instances of all Message classes can be converted into JSON compatible dictionaries with the following construct +(Bytes are base64 encoded): + +``` +>>> p_dict = type(p).to_dict(p) +>>> p_dict +{'inline_data': {'mime_type': 'image/png', 'data': 'UE5H'}} +``` + +A compatible dict can be converted to an instance of a Message class by passing it as the first argument to the +constructor: + +``` +>>> p = protos.Part(p_dict) +inline_data { + mime_type: "image/png" + data: "PNG" +} +``` + +Note when converting that `to_dict` accepts additional arguments: + +- `use_integers_for_enums:bool = True`, Set it to `False` to replace enum int values with their string + names in the output +- ` including_default_value_fields:bool = True`, Set it to `False` to reduce the verbosity of the output. + +Additional arguments are described in the docstring: + +``` +>>> help(proto.Part.to_dict) +``` + +## Classes + +[`class AttributionSourceId`](../../google/generativeai/protos/AttributionSourceId.md): Identifier for the source contributing to this attribution. + +[`class BatchCreateChunksRequest`](../../google/generativeai/protos/BatchCreateChunksRequest.md): Request to batch create ``Chunk``\ s. + +[`class BatchCreateChunksResponse`](../../google/generativeai/protos/BatchCreateChunksResponse.md): Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s. + +[`class BatchDeleteChunksRequest`](../../google/generativeai/protos/BatchDeleteChunksRequest.md): Request to batch delete ``Chunk``\ s. + +[`class BatchEmbedContentsRequest`](../../google/generativeai/protos/BatchEmbedContentsRequest.md): Batch request to get embeddings from the model for a list of prompts. + +[`class BatchEmbedContentsResponse`](../../google/generativeai/protos/BatchEmbedContentsResponse.md): The response to a ``BatchEmbedContentsRequest``. + +[`class BatchEmbedTextRequest`](../../google/generativeai/protos/BatchEmbedTextRequest.md): Batch request to get a text embedding from the model. + +[`class BatchEmbedTextResponse`](../../google/generativeai/protos/BatchEmbedTextResponse.md): The response to a EmbedTextRequest. + +[`class BatchUpdateChunksRequest`](../../google/generativeai/protos/BatchUpdateChunksRequest.md): Request to batch update ``Chunk``\ s. + +[`class BatchUpdateChunksResponse`](../../google/generativeai/protos/BatchUpdateChunksResponse.md): Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s. + +[`class Blob`](../../google/generativeai/protos/Blob.md): Raw media bytes. + +[`class CachedContent`](../../google/generativeai/protos/CachedContent.md): Content that has been preprocessed and can be used in subsequent request to GenerativeService. + +[`class Candidate`](../../google/generativeai/protos/Candidate.md): A response candidate generated from the model. + +[`class Chunk`](../../google/generativeai/protos/Chunk.md): A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage. + +[`class ChunkData`](../../google/generativeai/protos/ChunkData.md): Extracted data that represents the ``Chunk`` content. + +[`class CitationMetadata`](../../google/generativeai/protos/CitationMetadata.md): A collection of source attributions for a piece of content. + +[`class CitationSource`](../../google/generativeai/protos/CitationSource.md): A citation to a source for a portion of a specific response. + +[`class CodeExecution`](../../google/generativeai/protos/CodeExecution.md): Tool that executes code generated by the model, and automatically returns the result to the model. + +[`class CodeExecutionResult`](../../google/generativeai/protos/CodeExecutionResult.md): Result of executing the ``ExecutableCode``. + +[`class Condition`](../../google/generativeai/protos/Condition.md): Filter condition applicable to a single key. + +[`class Content`](../../google/generativeai/protos/Content.md): The base structured datatype containing multi-part content of a message. + +[`class ContentEmbedding`](../../google/generativeai/protos/ContentEmbedding.md): A list of floats representing an embedding. + +[`class ContentFilter`](../../google/generativeai/protos/ContentFilter.md): Content filtering metadata associated with processing a single request. + +[`class Corpus`](../../google/generativeai/protos/Corpus.md): A ``Corpus`` is a collection of ``Document``\ s. + +[`class CountMessageTokensRequest`](../../google/generativeai/protos/CountMessageTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model. + +[`class CountMessageTokensResponse`](../../google/generativeai/protos/CountMessageTokensResponse.md): A response from ``CountMessageTokens``. + +[`class CountTextTokensRequest`](../../google/generativeai/protos/CountTextTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model. + +[`class CountTextTokensResponse`](../../google/generativeai/protos/CountTextTokensResponse.md): A response from ``CountTextTokens``. + +[`class CountTokensRequest`](../../google/generativeai/protos/CountTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model. + +[`class CountTokensResponse`](../../google/generativeai/protos/CountTokensResponse.md): A response from ``CountTokens``. + +[`class CreateCachedContentRequest`](../../google/generativeai/protos/CreateCachedContentRequest.md): Request to create CachedContent. + +[`class CreateChunkRequest`](../../google/generativeai/protos/CreateChunkRequest.md): Request to create a ``Chunk``. + +[`class CreateCorpusRequest`](../../google/generativeai/protos/CreateCorpusRequest.md): Request to create a ``Corpus``. + +[`class CreateDocumentRequest`](../../google/generativeai/protos/CreateDocumentRequest.md): Request to create a ``Document``. + +[`class CreateFileRequest`](../../google/generativeai/protos/CreateFileRequest.md): Request for ``CreateFile``. + +[`class CreateFileResponse`](../../google/generativeai/protos/CreateFileResponse.md): Response for ``CreateFile``. + +[`class CreatePermissionRequest`](../../google/generativeai/protos/CreatePermissionRequest.md): Request to create a ``Permission``. + +[`class CreateTunedModelMetadata`](../../google/generativeai/protos/CreateTunedModelMetadata.md): Metadata about the state and progress of creating a tuned model returned from the long-running operation + +[`class CreateTunedModelRequest`](../../google/generativeai/protos/CreateTunedModelRequest.md): Request to create a TunedModel. + +[`class CustomMetadata`](../../google/generativeai/protos/CustomMetadata.md): User provided metadata stored as key-value pairs. + +[`class Dataset`](../../google/generativeai/protos/Dataset.md): Dataset for training or validation. + +[`class DeleteCachedContentRequest`](../../google/generativeai/protos/DeleteCachedContentRequest.md): Request to delete CachedContent. + +[`class DeleteChunkRequest`](../../google/generativeai/protos/DeleteChunkRequest.md): Request to delete a ``Chunk``. + +[`class DeleteCorpusRequest`](../../google/generativeai/protos/DeleteCorpusRequest.md): Request to delete a ``Corpus``. + +[`class DeleteDocumentRequest`](../../google/generativeai/protos/DeleteDocumentRequest.md): Request to delete a ``Document``. + +[`class DeleteFileRequest`](../../google/generativeai/protos/DeleteFileRequest.md): Request for ``DeleteFile``. + +[`class DeletePermissionRequest`](../../google/generativeai/protos/DeletePermissionRequest.md): Request to delete the ``Permission``. + +[`class DeleteTunedModelRequest`](../../google/generativeai/protos/DeleteTunedModelRequest.md): Request to delete a TunedModel. + +[`class Document`](../../google/generativeai/protos/Document.md): A ``Document`` is a collection of ``Chunk``\ s. + +[`class EmbedContentRequest`](../../google/generativeai/protos/EmbedContentRequest.md): Request containing the ``Content`` for the model to embed. + +[`class EmbedContentResponse`](../../google/generativeai/protos/EmbedContentResponse.md): The response to an ``EmbedContentRequest``. + +[`class EmbedTextRequest`](../../google/generativeai/protos/EmbedTextRequest.md): Request to get a text embedding from the model. + +[`class EmbedTextResponse`](../../google/generativeai/protos/EmbedTextResponse.md): The response to a EmbedTextRequest. + +[`class Embedding`](../../google/generativeai/protos/Embedding.md): A list of floats representing the embedding. + +[`class Example`](../../google/generativeai/protos/Example.md): An input/output example used to instruct the Model. + +[`class ExecutableCode`](../../google/generativeai/protos/ExecutableCode.md): Code generated by the model that is meant to be executed, and the result returned to the model. + +[`class File`](../../google/generativeai/protos/File.md): A file uploaded to the API. + +[`class FileData`](../../google/generativeai/protos/FileData.md): URI based data. + +[`class FunctionCall`](../../google/generativeai/protos/FunctionCall.md): A predicted ``FunctionCall`` returned from the model that contains a string representing the
FunctionDeclaration.name with the arguments and their values. + +[`class FunctionCallingConfig`](../../google/generativeai/protos/FunctionCallingConfig.md): Configuration for specifying function calling behavior. + +[`class FunctionDeclaration`](../../google/generativeai/protos/FunctionDeclaration.md): Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__. + +[`class FunctionResponse`](../../google/generativeai/protos/FunctionResponse.md): The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. + +[`class GenerateAnswerRequest`](../../google/generativeai/protos/GenerateAnswerRequest.md): Request to generate a grounded answer from the model. + +[`class GenerateAnswerResponse`](../../google/generativeai/protos/GenerateAnswerResponse.md): Response from the model for a grounded answer. + +[`class GenerateContentRequest`](../../google/generativeai/protos/GenerateContentRequest.md): Request to generate a completion from the model. + +[`class GenerateContentResponse`](../../google/generativeai/protos/GenerateContentResponse.md): Response from the model supporting multiple candidates. + +[`class GenerateMessageRequest`](../../google/generativeai/protos/GenerateMessageRequest.md): Request to generate a message response from the model. + +[`class GenerateMessageResponse`](../../google/generativeai/protos/GenerateMessageResponse.md): The response from the model. + +[`class GenerateTextRequest`](../../google/generativeai/protos/GenerateTextRequest.md): Request to generate a text completion response from the model. + +[`class GenerateTextResponse`](../../google/generativeai/protos/GenerateTextResponse.md): The response from the model, including candidate completions. + +[`class GenerationConfig`](../../google/generativeai/protos/GenerationConfig.md): Configuration options for model generation and outputs. + +[`class GetCachedContentRequest`](../../google/generativeai/protos/GetCachedContentRequest.md): Request to read CachedContent. + +[`class GetChunkRequest`](../../google/generativeai/protos/GetChunkRequest.md): Request for getting information about a specific ``Chunk``. + +[`class GetCorpusRequest`](../../google/generativeai/protos/GetCorpusRequest.md): Request for getting information about a specific ``Corpus``. + +[`class GetDocumentRequest`](../../google/generativeai/protos/GetDocumentRequest.md): Request for getting information about a specific ``Document``. + +[`class GetFileRequest`](../../google/generativeai/protos/GetFileRequest.md): Request for ``GetFile``. + +[`class GetModelRequest`](../../google/generativeai/protos/GetModelRequest.md): Request for getting information about a specific Model. + +[`class GetPermissionRequest`](../../google/generativeai/protos/GetPermissionRequest.md): Request for getting information about a specific ``Permission``. + +[`class GetTunedModelRequest`](../../google/generativeai/protos/GetTunedModelRequest.md): Request for getting information about a specific Model. + +[`class GroundingAttribution`](../../google/generativeai/protos/GroundingAttribution.md): Attribution for a source that contributed to an answer. + +[`class GroundingPassage`](../../google/generativeai/protos/GroundingPassage.md): Passage included inline with a grounding configuration. + +[`class GroundingPassages`](../../google/generativeai/protos/GroundingPassages.md): A repeated list of passages. + +[`class HarmCategory`](../../google/generativeai/protos/HarmCategory.md): The category of a rating. + +[`class Hyperparameters`](../../google/generativeai/protos/Hyperparameters.md): Hyperparameters controlling the tuning process. + +[`class ListCachedContentsRequest`](../../google/generativeai/protos/ListCachedContentsRequest.md): Request to list CachedContents. + +[`class ListCachedContentsResponse`](../../google/generativeai/protos/ListCachedContentsResponse.md): Response with CachedContents list. + +[`class ListChunksRequest`](../../google/generativeai/protos/ListChunksRequest.md): Request for listing ``Chunk``\ s. + +[`class ListChunksResponse`](../../google/generativeai/protos/ListChunksResponse.md): Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s. + +[`class ListCorporaRequest`](../../google/generativeai/protos/ListCorporaRequest.md): Request for listing ``Corpora``. + +[`class ListCorporaResponse`](../../google/generativeai/protos/ListCorporaResponse.md): Response from ``ListCorpora`` containing a paginated list of ``Corpora``. + +[`class ListDocumentsRequest`](../../google/generativeai/protos/ListDocumentsRequest.md): Request for listing ``Document``\ s. + +[`class ListDocumentsResponse`](../../google/generativeai/protos/ListDocumentsResponse.md): Response from ``ListDocuments`` containing a paginated list of ``Document``\ s. + +[`class ListFilesRequest`](../../google/generativeai/protos/ListFilesRequest.md): Request for ``ListFiles``. + +[`class ListFilesResponse`](../../google/generativeai/protos/ListFilesResponse.md): Response for ``ListFiles``. + +[`class ListModelsRequest`](../../google/generativeai/protos/ListModelsRequest.md): Request for listing all Models. + +[`class ListModelsResponse`](../../google/generativeai/protos/ListModelsResponse.md): Response from ``ListModel`` containing a paginated list of Models. + +[`class ListPermissionsRequest`](../../google/generativeai/protos/ListPermissionsRequest.md): Request for listing permissions. + +[`class ListPermissionsResponse`](../../google/generativeai/protos/ListPermissionsResponse.md): Response from ``ListPermissions`` containing a paginated list of permissions. + +[`class ListTunedModelsRequest`](../../google/generativeai/protos/ListTunedModelsRequest.md): Request for listing TunedModels. + +[`class ListTunedModelsResponse`](../../google/generativeai/protos/ListTunedModelsResponse.md): Response from ``ListTunedModels`` containing a paginated list of Models. + +[`class Message`](../../google/generativeai/protos/Message.md): The base unit of structured text. + +[`class MessagePrompt`](../../google/generativeai/protos/MessagePrompt.md): All of the structured input text passed to the model as a prompt. + +[`class MetadataFilter`](../../google/generativeai/protos/MetadataFilter.md): User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values. + +[`class Model`](../../google/generativeai/protos/Model.md): Information about a Generative Language Model. + +[`class Part`](../../google/generativeai/protos/Part.md): A datatype containing media that is part of a multi-part ``Content`` message. + +[`class Permission`](../../google/generativeai/protos/Permission.md): Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g. + +[`class QueryCorpusRequest`](../../google/generativeai/protos/QueryCorpusRequest.md): Request for querying a ``Corpus``. + +[`class QueryCorpusResponse`](../../google/generativeai/protos/QueryCorpusResponse.md): Response from ``QueryCorpus`` containing a list of relevant chunks. + +[`class QueryDocumentRequest`](../../google/generativeai/protos/QueryDocumentRequest.md): Request for querying a ``Document``. + +[`class QueryDocumentResponse`](../../google/generativeai/protos/QueryDocumentResponse.md): Response from ``QueryDocument`` containing a list of relevant chunks. + +[`class RelevantChunk`](../../google/generativeai/protos/RelevantChunk.md): The information for a chunk relevant to a query. + +[`class SafetyFeedback`](../../google/generativeai/protos/SafetyFeedback.md): Safety feedback for an entire request. + +[`class SafetyRating`](../../google/generativeai/protos/SafetyRating.md): Safety rating for a piece of content. + +[`class SafetySetting`](../../google/generativeai/protos/SafetySetting.md): Safety setting, affecting the safety-blocking behavior. + +[`class Schema`](../../google/generativeai/protos/Schema.md): The ``Schema`` object allows the definition of input and output data types. + +[`class SemanticRetrieverConfig`](../../google/generativeai/protos/SemanticRetrieverConfig.md): Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API. + +[`class StringList`](../../google/generativeai/protos/StringList.md): User provided string values assigned to a single metadata key. + +[`class TaskType`](../../google/generativeai/protos/TaskType.md): Type of task for which the embedding will be used. + +[`class TextCompletion`](../../google/generativeai/protos/TextCompletion.md): Output text returned from a model. + +[`class TextPrompt`](../../google/generativeai/protos/TextPrompt.md): Text given to the model as a prompt. + +[`class Tool`](../../google/generativeai/protos/Tool.md): Tool details that the model may use to generate response. + +[`class ToolConfig`](../../google/generativeai/protos/ToolConfig.md): The Tool configuration containing parameters for specifying ``Tool`` use in the request. + +[`class TransferOwnershipRequest`](../../google/generativeai/protos/TransferOwnershipRequest.md): Request to transfer the ownership of the tuned model. + +[`class TransferOwnershipResponse`](../../google/generativeai/protos/TransferOwnershipResponse.md): Response from ``TransferOwnership``. + +[`class TunedModel`](../../google/generativeai/protos/TunedModel.md): A fine-tuned model created using ModelService.CreateTunedModel. + +[`class TunedModelSource`](../../google/generativeai/protos/TunedModelSource.md): Tuned model as a source for training a new model. + +[`class TuningExample`](../../google/generativeai/protos/TuningExample.md): A single example for tuning. + +[`class TuningExamples`](../../google/generativeai/protos/TuningExamples.md): A set of tuning examples. Can be training or validation data. + +[`class TuningSnapshot`](../../google/generativeai/protos/TuningSnapshot.md): Record for a single tuning step. + +[`class TuningTask`](../../google/generativeai/protos/TuningTask.md): Tuning tasks that create tuned models. + +[`class Type`](../../google/generativeai/protos/Type.md): Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types + +[`class UpdateCachedContentRequest`](../../google/generativeai/protos/UpdateCachedContentRequest.md): Request to update CachedContent. + +[`class UpdateChunkRequest`](../../google/generativeai/protos/UpdateChunkRequest.md): Request to update a ``Chunk``. + +[`class UpdateCorpusRequest`](../../google/generativeai/protos/UpdateCorpusRequest.md): Request to update a ``Corpus``. + +[`class UpdateDocumentRequest`](../../google/generativeai/protos/UpdateDocumentRequest.md): Request to update a ``Document``. + +[`class UpdatePermissionRequest`](../../google/generativeai/protos/UpdatePermissionRequest.md): Request to update the ``Permission``. + +[`class UpdateTunedModelRequest`](../../google/generativeai/protos/UpdateTunedModelRequest.md): Request to update a TunedModel. + +[`class VideoMetadata`](../../google/generativeai/protos/VideoMetadata.md): Metadata for a video ``File``. + diff --git a/docs/api/google/generativeai/protos/AttributionSourceId.md b/docs/api/google/generativeai/protos/AttributionSourceId.md new file mode 100644 index 000000000..bdc426825 --- /dev/null +++ b/docs/api/google/generativeai/protos/AttributionSourceId.md @@ -0,0 +1,73 @@ +description: Identifier for the source contributing to this attribution. + +
+ + + + +
+ +# google.generativeai.protos.AttributionSourceId + + + + + + + + + +Identifier for the source contributing to this attribution. + + + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + +
+`grounding_passage` + +`google.ai.generativelanguage.AttributionSourceId.GroundingPassageId` + +Identifier for an inline passage. + +This field is a member of `oneof`_ ``source``. +
+`semantic_retriever_chunk` + +`google.ai.generativelanguage.AttributionSourceId.SemanticRetrieverChunk` + +Identifier for a ``Chunk`` fetched via Semantic Retriever. + +This field is a member of `oneof`_ ``source``. +
+ + + +## Child Classes +[`class GroundingPassageId`](../../../google/generativeai/protos/AttributionSourceId/GroundingPassageId.md) + +[`class SemanticRetrieverChunk`](../../../google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md) + diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md new file mode 100644 index 000000000..0e55174a0 --- /dev/null +++ b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md @@ -0,0 +1,59 @@ +description: Identifier for a part within a GroundingPassage. + +
+ + +
+ +# google.generativeai.protos.AttributionSourceId.GroundingPassageId + + + + + + + + + +Identifier for a part within a ``GroundingPassage``. + + + + + + + + + + + + + + + + + + +
+`passage_id` + +`str` + +Output only. ID of the passage matching the +``GenerateAnswerRequest``'s GroundingPassage.id. +
+`part_index` + +`int` + +Output only. Index of the part within the +``GenerateAnswerRequest``'s GroundingPassage.content. +
+ + + diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md new file mode 100644 index 000000000..dab874e2e --- /dev/null +++ b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md @@ -0,0 +1,60 @@ +description: Identifier for a Chunk retrieved via Semantic Retriever specified in the GenerateAnswerRequest using SemanticRetrieverConfig. + +
+ + +
+ +# google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk + + + + + + + + + +Identifier for a ``Chunk`` retrieved via Semantic Retriever specified in the ``GenerateAnswerRequest`` using ``SemanticRetrieverConfig``. + + + + + + + + + + + + + + + + + + +
+`source` + +`str` + +Output only. Name of the source matching the request's +SemanticRetrieverConfig.source. Example: ``corpora/123`` +or ``corpora/123/documents/abc`` +
+`chunk` + +`str` + +Output only. Name of the ``Chunk`` containing the attributed +text. Example: ``corpora/123/documents/abc/chunks/xyz`` +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md new file mode 100644 index 000000000..4508763a9 --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md @@ -0,0 +1,62 @@ +description: Request to batch create Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.BatchCreateChunksRequest + + + + + + + + + +Request to batch create ``Chunk``\ s. + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Optional. The name of the ``Document`` where this batch of +``Chunk``\ s will be created. The parent field in every +``CreateChunkRequest`` must match this value. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+`requests` + +`MutableSequence[google.ai.generativelanguage.CreateChunkRequest]` + +Required. The request messages specifying the ``Chunk``\ s +to create. A maximum of 100 ``Chunk``\ s can be created in a +batch. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md new file mode 100644 index 000000000..c5c7315cf --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md @@ -0,0 +1,48 @@ +description: Response from BatchCreateChunks containing a list of created Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.BatchCreateChunksResponse + + + + + + + + + +Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s. + + + + + + + + + + + + + + + +
+`chunks` + +`MutableSequence[google.ai.generativelanguage.Chunk]` + +``Chunk``\ s created. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md new file mode 100644 index 000000000..da61fbb11 --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md @@ -0,0 +1,61 @@ +description: Request to batch delete Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.BatchDeleteChunksRequest + + + + + + + + + +Request to batch delete ``Chunk``\ s. + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Optional. The name of the ``Document`` containing the +``Chunk``\ s to delete. The parent field in every +``DeleteChunkRequest`` must match this value. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+`requests` + +`MutableSequence[google.ai.generativelanguage.DeleteChunkRequest]` + +Required. The request messages specifying the ``Chunk``\ s +to delete. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md new file mode 100644 index 000000000..229a8899a --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md @@ -0,0 +1,65 @@ +description: Batch request to get embeddings from the model for a list of prompts. + +
+ + +
+ +# google.generativeai.protos.BatchEmbedContentsRequest + + + + + + + + + +Batch request to get embeddings from the model for a list of prompts. + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The model's resource name. This serves as an ID +for the Model to use. + +This name should match a model name returned by the +``ListModels`` method. + +Format: ``models/{model}`` +
+`requests` + +`MutableSequence[google.ai.generativelanguage.EmbedContentRequest]` + +Required. Embed requests for the batch. The model in each of +these requests must match the model specified +BatchEmbedContentsRequest.model. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md new file mode 100644 index 000000000..a4fc7b328 --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md @@ -0,0 +1,50 @@ +description: The response to a BatchEmbedContentsRequest. + +
+ + +
+ +# google.generativeai.protos.BatchEmbedContentsResponse + + + + + + + + + +The response to a ``BatchEmbedContentsRequest``. + + + + + + + + + + + + + + + +
+`embeddings` + +`MutableSequence[google.ai.generativelanguage.ContentEmbedding]` + +Output only. The embeddings for each request, +in the same order as provided in the batch +request. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md new file mode 100644 index 000000000..729ca1683 --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md @@ -0,0 +1,71 @@ +description: Batch request to get a text embedding from the model. + +
+ + +
+ +# google.generativeai.protos.BatchEmbedTextRequest + + + + + + + + + +Batch request to get a text embedding from the model. + + + + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The name of the ``Model`` to use for generating +the embedding. Examples: models/embedding-gecko-001 +
+`texts` + +`MutableSequence[str]` + +Optional. The free-form input texts that the +model will turn into an embedding. The current +limit is 100 texts, over which an error will be +thrown. +
+`requests` + +`MutableSequence[google.ai.generativelanguage.EmbedTextRequest]` + +Optional. Embed requests for the batch. Only one of +``texts`` or ``requests`` can be set. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md new file mode 100644 index 000000000..852411097 --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md @@ -0,0 +1,49 @@ +description: The response to a EmbedTextRequest. + +
+ + +
+ +# google.generativeai.protos.BatchEmbedTextResponse + + + + + + + + + +The response to a EmbedTextRequest. + + + + + + + + + + + + + + + +
+`embeddings` + +`MutableSequence[google.ai.generativelanguage.Embedding]` + +Output only. The embeddings generated from +the input text. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md new file mode 100644 index 000000000..0d1445da9 --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md @@ -0,0 +1,62 @@ +description: Request to batch update Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.BatchUpdateChunksRequest + + + + + + + + + +Request to batch update ``Chunk``\ s. + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Optional. The name of the ``Document`` containing the +``Chunk``\ s to update. The parent field in every +``UpdateChunkRequest`` must match this value. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+`requests` + +`MutableSequence[google.ai.generativelanguage.UpdateChunkRequest]` + +Required. The request messages specifying the ``Chunk``\ s +to update. A maximum of 100 ``Chunk``\ s can be updated in a +batch. +
+ + + diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md new file mode 100644 index 000000000..258f21f7c --- /dev/null +++ b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md @@ -0,0 +1,48 @@ +description: Response from BatchUpdateChunks containing a list of updated Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.BatchUpdateChunksResponse + + + + + + + + + +Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s. + + + + + + + + + + + + + + + +
+`chunks` + +`MutableSequence[google.ai.generativelanguage.Chunk]` + +``Chunk``\ s updated. +
+ + + diff --git a/docs/api/google/generativeai/protos/Blob.md b/docs/api/google/generativeai/protos/Blob.md new file mode 100644 index 000000000..7985af92f --- /dev/null +++ b/docs/api/google/generativeai/protos/Blob.md @@ -0,0 +1,64 @@ +description: Raw media bytes. + +
+ + +
+ +# google.generativeai.protos.Blob + + + + + + + + + +Raw media bytes. + + + +Text should not be sent as raw bytes, use the 'text' field. + + + + + + + + + + + + + + + +
+`mime_type` + +`str` + +The IANA standard MIME type of the source data. Examples: + +- image/png +- image/jpeg If an unsupported MIME type is provided, an + error will be returned. For a complete list of supported + types, see `Supported file + formats `__. +
+`data` + +`bytes` + +Raw bytes for media formats. +
+ + + diff --git a/docs/api/google/generativeai/protos/CachedContent.md b/docs/api/google/generativeai/protos/CachedContent.md new file mode 100644 index 000000000..dd6d513b9 --- /dev/null +++ b/docs/api/google/generativeai/protos/CachedContent.md @@ -0,0 +1,181 @@ +description: Content that has been preprocessed and can be used in subsequent request to GenerativeService. + +
+ + + +
+ +# google.generativeai.protos.CachedContent + + + + + + + + + +Content that has been preprocessed and can be used in subsequent request to GenerativeService. + + + +Cached content can be only used with model it was created for. + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`expire_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Timestamp in UTC of when this resource is considered +expired. This is *always* provided on output, regardless of +what was sent on input. + +This field is a member of `oneof`_ ``expiration``. +
+`ttl` + +`google.protobuf.duration_pb2.Duration` + +Input only. New TTL for this resource, input +only. + +This field is a member of `oneof`_ ``expiration``. +
+`name` + +`str` + +Optional. Identifier. The resource name referring to the +cached content. Format: ``cachedContents/{id}`` + +
+`display_name` + +`str` + +Optional. Immutable. The user-generated +meaningful display name of the cached content. +Maximum 128 Unicode characters. + +
+`model` + +`str` + +Required. Immutable. The name of the ``Model`` to use for +cached content Format: ``models/{model}`` + +
+`system_instruction` + +`google.ai.generativelanguage.Content` + +Optional. Input only. Immutable. Developer +set system instruction. Currently text only. + +
+`contents` + +`MutableSequence[google.ai.generativelanguage.Content]` + +Optional. Input only. Immutable. The content +to cache. +
+`tools` + +`MutableSequence[google.ai.generativelanguage.Tool]` + +Optional. Input only. Immutable. A list of ``Tools`` the +model may use to generate the next response +
+`tool_config` + +`google.ai.generativelanguage.ToolConfig` + +Optional. Input only. Immutable. Tool config. +This config is shared for all tools. + +
+`create_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. Creation time of the cache +entry. +
+`update_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. When the cache entry was last +updated in UTC time. +
+`usage_metadata` + +`google.ai.generativelanguage.CachedContent.UsageMetadata` + +Output only. Metadata on the usage of the +cached content. +
+ + + +## Child Classes +[`class UsageMetadata`](../../../google/generativeai/protos/CachedContent/UsageMetadata.md) + diff --git a/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md new file mode 100644 index 000000000..b2bab36fe --- /dev/null +++ b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md @@ -0,0 +1,49 @@ +description: Metadata on the usage of the cached content. + +
+ + +
+ +# google.generativeai.protos.CachedContent.UsageMetadata + + + + + + + + + +Metadata on the usage of the cached content. + + + + + + + + + + + + + + + +
+`total_token_count` + +`int` + +Total number of tokens that the cached +content consumes. +
+ + + diff --git a/docs/api/google/generativeai/protos/Candidate.md b/docs/api/google/generativeai/protos/Candidate.md new file mode 100644 index 000000000..ee20c608c --- /dev/null +++ b/docs/api/google/generativeai/protos/Candidate.md @@ -0,0 +1,123 @@ +description: A response candidate generated from the model. + +
+ + + +
+ +# google.generativeai.protos.Candidate + + + + + + + + + +A response candidate generated from the model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`index` + +`int` + +Output only. Index of the candidate in the +list of candidates. + +
+`content` + +`google.ai.generativelanguage.Content` + +Output only. Generated content returned from +the model. +
+`finish_reason` + +`google.ai.generativelanguage.Candidate.FinishReason` + +Optional. Output only. The reason why the +model stopped generating tokens. +If empty, the model has not stopped generating +the tokens. +
+`safety_ratings` + +`MutableSequence[google.ai.generativelanguage.SafetyRating]` + +List of ratings for the safety of a response +candidate. +There is at most one rating per category. +
+`citation_metadata` + +`google.ai.generativelanguage.CitationMetadata` + +Output only. Citation information for model-generated +candidate. + +This field may be populated with recitation information for +any text included in the ``content``. These are passages +that are "recited" from copyrighted material in the +foundational LLM's training data. +
+`token_count` + +`int` + +Output only. Token count for this candidate. +
+`grounding_attributions` + +`MutableSequence[google.ai.generativelanguage.GroundingAttribution]` + +Output only. Attribution information for sources that +contributed to a grounded answer. + +This field is populated for ``GenerateAnswer`` calls. +
+ + + +## Child Classes +[`class FinishReason`](../../../google/generativeai/protos/Candidate/FinishReason.md) + diff --git a/docs/api/google/generativeai/protos/Candidate/FinishReason.md b/docs/api/google/generativeai/protos/Candidate/FinishReason.md new file mode 100644 index 000000000..d6efd591b --- /dev/null +++ b/docs/api/google/generativeai/protos/Candidate/FinishReason.md @@ -0,0 +1,733 @@ +description: Defines the reason why the model stopped generating tokens. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.Candidate.FinishReason + + + + + + + + + +Defines the reason why the model stopped generating tokens. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`FINISH_REASON_UNSPECIFIED` + +`0` + +Default value. This value is unused. +
+`STOP` + +`1` + +Natural stop point of the model or provided +stop sequence. +
+`MAX_TOKENS` + +`2` + +The maximum number of tokens as specified in +the request was reached. +
+`SAFETY` + +`3` + +The candidate content was flagged for safety +reasons. +
+`RECITATION` + +`4` + +The candidate content was flagged for +recitation reasons. +
+`OTHER` + +`5` + +Unknown reason. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+FINISH_REASON_UNSPECIFIED + +`` +
+MAX_TOKENS + +`` +
+OTHER + +`` +
+RECITATION + +`` +
+SAFETY + +`` +
+STOP + +`` +
+ diff --git a/docs/api/google/generativeai/protos/Chunk.md b/docs/api/google/generativeai/protos/Chunk.md new file mode 100644 index 000000000..d807ab292 --- /dev/null +++ b/docs/api/google/generativeai/protos/Chunk.md @@ -0,0 +1,108 @@ +description: A Chunk is a subpart of a Document that is treated as an independent unit for the purposes of vector representation and storage. + +
+ + + +
+ +# google.generativeai.protos.Chunk + + + + + + + + + +A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage. + + + A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Immutable. Identifier. The ``Chunk`` resource name. The ID +(name excluding the `corpora/*/documents/*/chunks/` prefix) +can contain up to 40 characters that are lowercase +alphanumeric or dashes (-). The ID cannot start or end with +a dash. If the name is empty on create, a random +12-character unique ID will be generated. Example: +``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c`` +
+`data` + +`google.ai.generativelanguage.ChunkData` + +Required. The content for the ``Chunk``, such as the text +string. The maximum number of tokens per chunk is 2043. +
+`custom_metadata` + +`MutableSequence[google.ai.generativelanguage.CustomMetadata]` + +Optional. User provided custom metadata stored as key-value +pairs. The maximum number of ``CustomMetadata`` per chunk is +20. +
+`create_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The Timestamp of when the ``Chunk`` was +created. +
+`update_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The Timestamp of when the ``Chunk`` was last +updated. +
+`state` + +`google.ai.generativelanguage.Chunk.State` + +Output only. Current state of the ``Chunk``. +
+ + + +## Child Classes +[`class State`](../../../google/generativeai/protos/Chunk/State.md) + diff --git a/docs/api/google/generativeai/protos/Chunk/State.md b/docs/api/google/generativeai/protos/Chunk/State.md new file mode 100644 index 000000000..137cb9efe --- /dev/null +++ b/docs/api/google/generativeai/protos/Chunk/State.md @@ -0,0 +1,696 @@ +description: States for the lifecycle of a Chunk. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.Chunk.State + + + + + + + + + +States for the lifecycle of a ``Chunk``. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`STATE_UNSPECIFIED` + +`0` + +The default value. This value is used if the +state is omitted. +
+`STATE_PENDING_PROCESSING` + +`1` + +``Chunk`` is being processed (embedding and vector storage). +
+`STATE_ACTIVE` + +`2` + +``Chunk`` is processed and available for querying. +
+`STATE_FAILED` + +`10` + +``Chunk`` failed processing. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+STATE_ACTIVE + +`` +
+STATE_FAILED + +`` +
+STATE_PENDING_PROCESSING + +`` +
+STATE_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/protos/ChunkData.md b/docs/api/google/generativeai/protos/ChunkData.md new file mode 100644 index 000000000..9527b78b5 --- /dev/null +++ b/docs/api/google/generativeai/protos/ChunkData.md @@ -0,0 +1,51 @@ +description: Extracted data that represents the Chunk content. + +
+ + +
+ +# google.generativeai.protos.ChunkData + + + + + + + + + +Extracted data that represents the ``Chunk`` content. + + + + + + + + + + + + + + + +
+`string_value` + +`str` + +The ``Chunk`` content as a string. The maximum number of +tokens per chunk is 2043. + +This field is a member of `oneof`_ ``data``. +
+ + + diff --git a/docs/api/google/generativeai/protos/CitationMetadata.md b/docs/api/google/generativeai/protos/CitationMetadata.md new file mode 100644 index 000000000..58e6219df --- /dev/null +++ b/docs/api/google/generativeai/protos/CitationMetadata.md @@ -0,0 +1,48 @@ +description: A collection of source attributions for a piece of content. + +
+ + +
+ +# google.generativeai.protos.CitationMetadata + + + + + + + + + +A collection of source attributions for a piece of content. + + + + + + + + + + + + + + + +
+`citation_sources` + +`MutableSequence[google.ai.generativelanguage.CitationSource]` + +Citations to sources for a specific response. +
+ + + diff --git a/docs/api/google/generativeai/protos/CitationSource.md b/docs/api/google/generativeai/protos/CitationSource.md new file mode 100644 index 000000000..79689da5d --- /dev/null +++ b/docs/api/google/generativeai/protos/CitationSource.md @@ -0,0 +1,88 @@ +description: A citation to a source for a portion of a specific response. + +
+ + +
+ +# google.generativeai.protos.CitationSource + + + + + + + + + +A citation to a source for a portion of a specific response. + + + + + + + + + + + + + + + + + + + + + + + + +
+`start_index` + +`int` + +Optional. Start of segment of the response +that is attributed to this source. + +Index indicates the start of the segment, +measured in bytes. + +
+`end_index` + +`int` + +Optional. End of the attributed segment, +exclusive. + +
+`uri` + +`str` + +Optional. URI that is attributed as a source +for a portion of the text. + +
+`license_` + +`str` + +Optional. License for the GitHub project that +is attributed as a source for segment. + +License info is required for code citations. + +
+ + + diff --git a/docs/api/google/generativeai/protos/CodeExecution.md b/docs/api/google/generativeai/protos/CodeExecution.md new file mode 100644 index 000000000..f74a88327 --- /dev/null +++ b/docs/api/google/generativeai/protos/CodeExecution.md @@ -0,0 +1,29 @@ +description: Tool that executes code generated by the model, and automatically returns the result to the model. + +
+ + +
+ +# google.generativeai.protos.CodeExecution + + + + + + + + + +Tool that executes code generated by the model, and automatically returns the result to the model. + + + +See also ``ExecutableCode`` and ``CodeExecutionResult`` which are +only generated when using this tool. + diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult.md b/docs/api/google/generativeai/protos/CodeExecutionResult.md new file mode 100644 index 000000000..39ce4573d --- /dev/null +++ b/docs/api/google/generativeai/protos/CodeExecutionResult.md @@ -0,0 +1,65 @@ +description: Result of executing the ExecutableCode. + +
+ + + +
+ +# google.generativeai.protos.CodeExecutionResult + + + + + + + + + +Result of executing the ``ExecutableCode``. + + + +Only generated when using the ``CodeExecution``, and always follows +a ``part`` containing the ``ExecutableCode``. + + + + + + + + + + + + + + + +
+`outcome` + +`google.ai.generativelanguage.CodeExecutionResult.Outcome` + +Required. Outcome of the code execution. +
+`output` + +`str` + +Optional. Contains stdout when code execution +is successful, stderr or other description +otherwise. +
+ + + +## Child Classes +[`class Outcome`](../../../google/generativeai/protos/CodeExecutionResult/Outcome.md) + diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md new file mode 100644 index 000000000..54b0195fd --- /dev/null +++ b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md @@ -0,0 +1,699 @@ +description: Enumeration of possible outcomes of the code execution. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.CodeExecutionResult.Outcome + + + + + + + + + +Enumeration of possible outcomes of the code execution. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`OUTCOME_UNSPECIFIED` + +`0` + +Unspecified status. This value should not be +used. +
+`OUTCOME_OK` + +`1` + +Code execution completed successfully. +
+`OUTCOME_FAILED` + +`2` + +Code execution finished but with a failure. ``stderr`` +should contain the reason. +
+`OUTCOME_DEADLINE_EXCEEDED` + +`3` + +Code execution ran for too long, and was +cancelled. There may or may not be a partial +output present. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+OUTCOME_DEADLINE_EXCEEDED + +`` +
+OUTCOME_FAILED + +`` +
+OUTCOME_OK + +`` +
+OUTCOME_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/protos/Condition.md b/docs/api/google/generativeai/protos/Condition.md new file mode 100644 index 000000000..194bdbc59 --- /dev/null +++ b/docs/api/google/generativeai/protos/Condition.md @@ -0,0 +1,80 @@ +description: Filter condition applicable to a single key. + +
+ + + +
+ +# google.generativeai.protos.Condition + + + + + + + + + +Filter condition applicable to a single key. + + + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + +
+`string_value` + +`str` + +The string value to filter the metadata on. + +This field is a member of `oneof`_ ``value``. +
+`numeric_value` + +`float` + +The numeric value to filter the metadata on. + +This field is a member of `oneof`_ ``value``. +
+`operation` + +`google.ai.generativelanguage.Condition.Operator` + +Required. Operator applied to the given +key-value pair to trigger the condition. +
+ + + +## Child Classes +[`class Operator`](../../../google/generativeai/protos/Condition/Operator.md) + diff --git a/docs/api/google/generativeai/protos/Condition/Operator.md b/docs/api/google/generativeai/protos/Condition/Operator.md new file mode 100644 index 000000000..1a7cdec31 --- /dev/null +++ b/docs/api/google/generativeai/protos/Condition/Operator.md @@ -0,0 +1,782 @@ +description: Defines the valid operators that can be applied to a key-value pair. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.Condition.Operator + + + + + + + + + +Defines the valid operators that can be applied to a key-value pair. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`OPERATOR_UNSPECIFIED` + +`0` + +The default value. This value is unused. +
+`LESS` + +`1` + +Supported by numeric. +
+`LESS_EQUAL` + +`2` + +Supported by numeric. +
+`EQUAL` + +`3` + +Supported by numeric & string. +
+`GREATER_EQUAL` + +`4` + +Supported by numeric. +
+`GREATER` + +`5` + +Supported by numeric. +
+`NOT_EQUAL` + +`6` + +Supported by numeric & string. +
+`INCLUDES` + +`7` + +Supported by string only when ``CustomMetadata`` value type +for the given key has a ``string_list_value``. +
+`EXCLUDES` + +`8` + +Supported by string only when ``CustomMetadata`` value type +for the given key has a ``string_list_value``. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+EQUAL + +`` +
+EXCLUDES + +`` +
+GREATER + +`` +
+GREATER_EQUAL + +`` +
+INCLUDES + +`` +
+LESS + +`` +
+LESS_EQUAL + +`` +
+NOT_EQUAL + +`` +
+OPERATOR_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/protos/Content.md b/docs/api/google/generativeai/protos/Content.md new file mode 100644 index 000000000..af204b5cf --- /dev/null +++ b/docs/api/google/generativeai/protos/Content.md @@ -0,0 +1,64 @@ +description: The base structured datatype containing multi-part content of a message. + +
+ + +
+ +# google.generativeai.protos.Content + + + + + + + + + +The base structured datatype containing multi-part content of a message. + + + +A ``Content`` includes a ``role`` field designating the producer of +the ``Content`` and a ``parts`` field containing multi-part data +that contains the content of the message turn. + + + + + + + + + + + + + + + +
+`parts` + +`MutableSequence[google.ai.generativelanguage.Part]` + +Ordered ``Parts`` that constitute a single message. Parts +may have different MIME types. +
+`role` + +`str` + +Optional. The producer of the content. Must +be either 'user' or 'model'. +Useful to set for multi-turn conversations, +otherwise can be left blank or unset. +
+ + + diff --git a/docs/api/google/generativeai/protos/ContentEmbedding.md b/docs/api/google/generativeai/protos/ContentEmbedding.md new file mode 100644 index 000000000..ce60ea10a --- /dev/null +++ b/docs/api/google/generativeai/protos/ContentEmbedding.md @@ -0,0 +1,48 @@ +description: A list of floats representing an embedding. + +
+ + +
+ +# google.generativeai.protos.ContentEmbedding + + + + + + + + + +A list of floats representing an embedding. + + + + + + + + + + + + + + + +
+`values` + +`MutableSequence[float]` + +The embedding values. +
+ + + diff --git a/docs/api/google/generativeai/protos/ContentFilter.md b/docs/api/google/generativeai/protos/ContentFilter.md new file mode 100644 index 000000000..659d438c5 --- /dev/null +++ b/docs/api/google/generativeai/protos/ContentFilter.md @@ -0,0 +1,67 @@ +description: Content filtering metadata associated with processing a single request. + +
+ + + +
+ +# google.generativeai.protos.ContentFilter + + + + + + + + + +Content filtering metadata associated with processing a single request. + + +ContentFilter contains a reason and an optional supporting +string. The reason may be unspecified. + + + + + + + + + + + + + + + + + +
+`reason` + +`google.ai.generativelanguage.ContentFilter.BlockedReason` + +The reason content was blocked during request +processing. +
+`message` + +`str` + +A string that describes the filtering +behavior in more detail. + +
+ + + +## Child Classes +[`class BlockedReason`](../../../google/generativeai/types/BlockedReason.md) + diff --git a/docs/api/google/generativeai/protos/Corpus.md b/docs/api/google/generativeai/protos/Corpus.md new file mode 100644 index 000000000..66b6ded78 --- /dev/null +++ b/docs/api/google/generativeai/protos/Corpus.md @@ -0,0 +1,87 @@ +description: A Corpus is a collection of Document\ s. + +
+ + +
+ +# google.generativeai.protos.Corpus + + + + + + + + + +A ``Corpus`` is a collection of ``Document``\ s. + + + A project can +create up to 5 corpora. + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Immutable. Identifier. The ``Corpus`` resource name. The ID +(name excluding the "corpora/" prefix) can contain up to 40 +characters that are lowercase alphanumeric or dashes (-). +The ID cannot start or end with a dash. If the name is empty +on create, a unique name will be derived from +``display_name`` along with a 12 character random suffix. +Example: ``corpora/my-awesome-corpora-123a456b789c`` +
+`display_name` + +`str` + +Optional. The human-readable display name for the +``Corpus``. The display name must be no more than 512 +characters in length, including spaces. Example: "Docs on +Semantic Retriever". +
+`create_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The Timestamp of when the ``Corpus`` was +created. +
+`update_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The Timestamp of when the ``Corpus`` was last +updated. +
+ + + diff --git a/docs/api/google/generativeai/protos/CountMessageTokensRequest.md b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md new file mode 100644 index 000000000..10be99376 --- /dev/null +++ b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md @@ -0,0 +1,66 @@ +description: Counts the number of tokens in the prompt sent to a model. + +
+ + +
+ +# google.generativeai.protos.CountMessageTokensRequest + + + + + + + + + +Counts the number of tokens in the ``prompt`` sent to a model. + + + +Models may tokenize text differently, so each model may return a +different ``token_count``. + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The model's resource name. This serves as an ID +for the Model to use. + +This name should match a model name returned by the +``ListModels`` method. + +Format: ``models/{model}`` +
+`prompt` + +`google.ai.generativelanguage.MessagePrompt` + +Required. The prompt, whose token count is to +be returned. +
+ + + diff --git a/docs/api/google/generativeai/protos/CountMessageTokensResponse.md b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md new file mode 100644 index 000000000..91a414735 --- /dev/null +++ b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md @@ -0,0 +1,52 @@ +description: A response from CountMessageTokens. + +
+ + +
+ +# google.generativeai.protos.CountMessageTokensResponse + + + + + + + + + +A response from ``CountMessageTokens``. + + + +It returns the model's ``token_count`` for the ``prompt``. + + + + + + + + + + + + +
+`token_count` + +`int` + +The number of tokens that the ``model`` tokenizes the +``prompt`` into. + +Always non-negative. +
+ + + diff --git a/docs/api/google/generativeai/protos/CountTextTokensRequest.md b/docs/api/google/generativeai/protos/CountTextTokensRequest.md new file mode 100644 index 000000000..dc28b4959 --- /dev/null +++ b/docs/api/google/generativeai/protos/CountTextTokensRequest.md @@ -0,0 +1,66 @@ +description: Counts the number of tokens in the prompt sent to a model. + +
+ + +
+ +# google.generativeai.protos.CountTextTokensRequest + + + + + + + + + +Counts the number of tokens in the ``prompt`` sent to a model. + + + +Models may tokenize text differently, so each model may return a +different ``token_count``. + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The model's resource name. This serves as an ID +for the Model to use. + +This name should match a model name returned by the +``ListModels`` method. + +Format: ``models/{model}`` +
+`prompt` + +`google.ai.generativelanguage.TextPrompt` + +Required. The free-form input text given to +the model as a prompt. +
+ + + diff --git a/docs/api/google/generativeai/protos/CountTextTokensResponse.md b/docs/api/google/generativeai/protos/CountTextTokensResponse.md new file mode 100644 index 000000000..75b139c89 --- /dev/null +++ b/docs/api/google/generativeai/protos/CountTextTokensResponse.md @@ -0,0 +1,52 @@ +description: A response from CountTextTokens. + +
+ + +
+ +# google.generativeai.protos.CountTextTokensResponse + + + + + + + + + +A response from ``CountTextTokens``. + + + +It returns the model's ``token_count`` for the ``prompt``. + + + + + + + + + + + + +
+`token_count` + +`int` + +The number of tokens that the ``model`` tokenizes the +``prompt`` into. + +Always non-negative. +
+ + + diff --git a/docs/api/google/generativeai/protos/CountTokensRequest.md b/docs/api/google/generativeai/protos/CountTokensRequest.md new file mode 100644 index 000000000..3ec7fd287 --- /dev/null +++ b/docs/api/google/generativeai/protos/CountTokensRequest.md @@ -0,0 +1,77 @@ +description: Counts the number of tokens in the prompt sent to a model. + +
+ + +
+ +# google.generativeai.protos.CountTokensRequest + + + + + + + + + +Counts the number of tokens in the ``prompt`` sent to a model. + + + +Models may tokenize text differently, so each model may return a +different ``token_count``. + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The model's resource name. This serves as an ID +for the Model to use. + +This name should match a model name returned by the +``ListModels`` method. + +Format: ``models/{model}`` +
+`contents` + +`MutableSequence[google.ai.generativelanguage.Content]` + +Optional. The input given to the model as a prompt. This +field is ignored when ``generate_content_request`` is set. +
+`generate_content_request` + +`google.ai.generativelanguage.GenerateContentRequest` + +Optional. The overall input given to the +model. CountTokens will count prompt, function +calling, etc. +
+ + + diff --git a/docs/api/google/generativeai/protos/CountTokensResponse.md b/docs/api/google/generativeai/protos/CountTokensResponse.md new file mode 100644 index 000000000..ca4c9899a --- /dev/null +++ b/docs/api/google/generativeai/protos/CountTokensResponse.md @@ -0,0 +1,64 @@ +description: A response from CountTokens. + +
+ + +
+ +# google.generativeai.protos.CountTokensResponse + + + + + + + + + +A response from ``CountTokens``. + + + +It returns the model's ``token_count`` for the ``prompt``. + + + + + + + + + + + + + + + +
+`total_tokens` + +`int` + +The number of tokens that the ``model`` tokenizes the +``prompt`` into. + +Always non-negative. When cached_content is set, this is +still the total effective prompt size. I.e. this includes +the number of tokens in the cached content. +
+`cached_content_token_count` + +`int` + +Number of tokens in the cached part of the +prompt, i.e. in the cached content. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateCachedContentRequest.md b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md new file mode 100644 index 000000000..d34f0b8da --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md @@ -0,0 +1,48 @@ +description: Request to create CachedContent. + +
+ + +
+ +# google.generativeai.protos.CreateCachedContentRequest + + + + + + + + + +Request to create CachedContent. + + + + + + + + + + + + + + + +
+`cached_content` + +`google.ai.generativelanguage.CachedContent` + +Required. The cached content to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateChunkRequest.md b/docs/api/google/generativeai/protos/CreateChunkRequest.md new file mode 100644 index 000000000..706273d4a --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateChunkRequest.md @@ -0,0 +1,59 @@ +description: Request to create a Chunk. + +
+ + +
+ +# google.generativeai.protos.CreateChunkRequest + + + + + + + + + +Request to create a ``Chunk``. + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Required. The name of the ``Document`` where this ``Chunk`` +will be created. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+`chunk` + +`google.ai.generativelanguage.Chunk` + +Required. The ``Chunk`` to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateCorpusRequest.md b/docs/api/google/generativeai/protos/CreateCorpusRequest.md new file mode 100644 index 000000000..68e3a89ce --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateCorpusRequest.md @@ -0,0 +1,48 @@ +description: Request to create a Corpus. + +
+ + +
+ +# google.generativeai.protos.CreateCorpusRequest + + + + + + + + + +Request to create a ``Corpus``. + + + + + + + + + + + + + + + +
+`corpus` + +`google.ai.generativelanguage.Corpus` + +Required. The ``Corpus`` to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateDocumentRequest.md b/docs/api/google/generativeai/protos/CreateDocumentRequest.md new file mode 100644 index 000000000..72a14b3aa --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateDocumentRequest.md @@ -0,0 +1,58 @@ +description: Request to create a Document. + +
+ + +
+ +# google.generativeai.protos.CreateDocumentRequest + + + + + + + + + +Request to create a ``Document``. + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Required. The name of the ``Corpus`` where this ``Document`` +will be created. Example: ``corpora/my-corpus-123`` +
+`document` + +`google.ai.generativelanguage.Document` + +Required. The ``Document`` to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateFileRequest.md b/docs/api/google/generativeai/protos/CreateFileRequest.md new file mode 100644 index 000000000..53f83c72c --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateFileRequest.md @@ -0,0 +1,48 @@ +description: Request for CreateFile. + +
+ + +
+ +# google.generativeai.protos.CreateFileRequest + + + + + + + + + +Request for ``CreateFile``. + + + + + + + + + + + + + + + +
+`file` + +`google.ai.generativelanguage.File` + +Optional. Metadata for the file to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateFileResponse.md b/docs/api/google/generativeai/protos/CreateFileResponse.md new file mode 100644 index 000000000..fd8ad9f44 --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateFileResponse.md @@ -0,0 +1,48 @@ +description: Response for CreateFile. + +
+ + +
+ +# google.generativeai.protos.CreateFileResponse + + + + + + + + + +Response for ``CreateFile``. + + + + + + + + + + + + + + + +
+`file` + +`google.ai.generativelanguage.File` + +Metadata for the created file. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreatePermissionRequest.md b/docs/api/google/generativeai/protos/CreatePermissionRequest.md new file mode 100644 index 000000000..8e8c9b945 --- /dev/null +++ b/docs/api/google/generativeai/protos/CreatePermissionRequest.md @@ -0,0 +1,58 @@ +description: Request to create a Permission. + +
+ + +
+ +# google.generativeai.protos.CreatePermissionRequest + + + + + + + + + +Request to create a ``Permission``. + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Required. The parent resource of the ``Permission``. +Formats: ``tunedModels/{tuned_model}`` ``corpora/{corpus}`` +
+`permission` + +`google.ai.generativelanguage.Permission` + +Required. The permission to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md new file mode 100644 index 000000000..79a3bd8e4 --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md @@ -0,0 +1,86 @@ +description: Metadata about the state and progress of creating a tuned model returned from the long-running operation + +
+ + +
+ +# google.generativeai.protos.CreateTunedModelMetadata + + + + + + + + + +Metadata about the state and progress of creating a tuned model returned from the long-running operation + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`tuned_model` + +`str` + +Name of the tuned model associated with the +tuning operation. +
+`total_steps` + +`int` + +The total number of tuning steps. +
+`completed_steps` + +`int` + +The number of steps completed. +
+`completed_percent` + +`float` + +The completed percentage for the tuning +operation. +
+`snapshots` + +`MutableSequence[google.ai.generativelanguage.TuningSnapshot]` + +Metrics collected during tuning. +
+ + + diff --git a/docs/api/google/generativeai/protos/CreateTunedModelRequest.md b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md new file mode 100644 index 000000000..572bc2c83 --- /dev/null +++ b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md @@ -0,0 +1,62 @@ +description: Request to create a TunedModel. + +
+ + +
+ +# google.generativeai.protos.CreateTunedModelRequest + + + + + + + + + +Request to create a TunedModel. + + + + + + + + + + + + + + + + + + +
+`tuned_model_id` + +`str` + +Optional. The unique id for the tuned model if specified. +This value should be up to 40 characters, the first +character must be a letter, the last could be a letter or a +number. The id must match the regular expression: +`a-z <[a-z0-9-]{0,38}[a-z0-9]>`__?. + +
+`tuned_model` + +`google.ai.generativelanguage.TunedModel` + +Required. The tuned model to create. +
+ + + diff --git a/docs/api/google/generativeai/protos/CustomMetadata.md b/docs/api/google/generativeai/protos/CustomMetadata.md new file mode 100644 index 000000000..1a401a93c --- /dev/null +++ b/docs/api/google/generativeai/protos/CustomMetadata.md @@ -0,0 +1,87 @@ +description: User provided metadata stored as key-value pairs. + +
+ + +
+ +# google.generativeai.protos.CustomMetadata + + + + + + + + + +User provided metadata stored as key-value pairs. + + + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + + + + +
+`string_value` + +`str` + +The string value of the metadata to store. + +This field is a member of `oneof`_ ``value``. +
+`string_list_value` + +`google.ai.generativelanguage.StringList` + +The StringList value of the metadata to +store. + +This field is a member of `oneof`_ ``value``. +
+`numeric_value` + +`float` + +The numeric value of the metadata to store. + +This field is a member of `oneof`_ ``value``. +
+`key` + +`str` + +Required. The key of the metadata to store. +
+ + + diff --git a/docs/api/google/generativeai/protos/Dataset.md b/docs/api/google/generativeai/protos/Dataset.md new file mode 100644 index 000000000..d00c056c6 --- /dev/null +++ b/docs/api/google/generativeai/protos/Dataset.md @@ -0,0 +1,50 @@ +description: Dataset for training or validation. + +
+ + +
+ +# google.generativeai.protos.Dataset + + + + + + + + + +Dataset for training or validation. + + + + + + + + + + + + + + + +
+`examples` + +`google.ai.generativelanguage.TuningExamples` + +Optional. Inline examples. + +This field is a member of `oneof`_ ``dataset``. +
+ + + diff --git a/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md new file mode 100644 index 000000000..48df77591 --- /dev/null +++ b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md @@ -0,0 +1,49 @@ +description: Request to delete CachedContent. + +
+ + +
+ +# google.generativeai.protos.DeleteCachedContentRequest + + + + + + + + + +Request to delete CachedContent. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name referring to the content cache +entry Format: ``cachedContents/{id}`` +
+ + + diff --git a/docs/api/google/generativeai/protos/DeleteChunkRequest.md b/docs/api/google/generativeai/protos/DeleteChunkRequest.md new file mode 100644 index 000000000..99f24bd06 --- /dev/null +++ b/docs/api/google/generativeai/protos/DeleteChunkRequest.md @@ -0,0 +1,50 @@ +description: Request to delete a Chunk. + +
+ + +
+ +# google.generativeai.protos.DeleteChunkRequest + + + + + + + + + +Request to delete a ``Chunk``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the ``Chunk`` to delete. +Example: +``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` +
+ + + diff --git a/docs/api/google/generativeai/protos/DeleteCorpusRequest.md b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md new file mode 100644 index 000000000..0c4e42727 --- /dev/null +++ b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md @@ -0,0 +1,62 @@ +description: Request to delete a Corpus. + +
+ + +
+ +# google.generativeai.protos.DeleteCorpusRequest + + + + + + + + + +Request to delete a ``Corpus``. + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the ``Corpus``. Example: +``corpora/my-corpus-123`` +
+`force` + +`bool` + +Optional. If set to true, any ``Document``\ s and objects +related to this ``Corpus`` will also be deleted. + +If false (the default), a ``FAILED_PRECONDITION`` error will +be returned if ``Corpus`` contains any ``Document``\ s. +
+ + + diff --git a/docs/api/google/generativeai/protos/DeleteDocumentRequest.md b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md new file mode 100644 index 000000000..e4c78f097 --- /dev/null +++ b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md @@ -0,0 +1,62 @@ +description: Request to delete a Document. + +
+ + +
+ +# google.generativeai.protos.DeleteDocumentRequest + + + + + + + + + +Request to delete a ``Document``. + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the ``Document`` to delete. +Example: ``corpora/my-corpus-123/documents/the-doc-abc`` +
+`force` + +`bool` + +Optional. If set to true, any ``Chunk``\ s and objects +related to this ``Document`` will also be deleted. + +If false (the default), a ``FAILED_PRECONDITION`` error will +be returned if ``Document`` contains any ``Chunk``\ s. +
+ + + diff --git a/docs/api/google/generativeai/protos/DeleteFileRequest.md b/docs/api/google/generativeai/protos/DeleteFileRequest.md new file mode 100644 index 000000000..68cefab0a --- /dev/null +++ b/docs/api/google/generativeai/protos/DeleteFileRequest.md @@ -0,0 +1,49 @@ +description: Request for DeleteFile. + +
+ + +
+ +# google.generativeai.protos.DeleteFileRequest + + + + + + + + + +Request for ``DeleteFile``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``File`` to delete. Example: +``files/abc-123`` +
+ + + diff --git a/docs/api/google/generativeai/protos/DeletePermissionRequest.md b/docs/api/google/generativeai/protos/DeletePermissionRequest.md new file mode 100644 index 000000000..616a8b075 --- /dev/null +++ b/docs/api/google/generativeai/protos/DeletePermissionRequest.md @@ -0,0 +1,50 @@ +description: Request to delete the Permission. + +
+ + +
+ +# google.generativeai.protos.DeletePermissionRequest + + + + + + + + + +Request to delete the ``Permission``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the permission. Formats: +``tunedModels/{tuned_model}/permissions/{permission}`` +``corpora/{corpus}/permissions/{permission}`` +
+ + + diff --git a/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md new file mode 100644 index 000000000..aa4cd8b93 --- /dev/null +++ b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md @@ -0,0 +1,49 @@ +description: Request to delete a TunedModel. + +
+ + +
+ +# google.generativeai.protos.DeleteTunedModelRequest + + + + + + + + + +Request to delete a TunedModel. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the model. Format: +``tunedModels/my-model-id`` +
+ + + diff --git a/docs/api/google/generativeai/protos/Document.md b/docs/api/google/generativeai/protos/Document.md new file mode 100644 index 000000000..fe32d1e41 --- /dev/null +++ b/docs/api/google/generativeai/protos/Document.md @@ -0,0 +1,99 @@ +description: A Document is a collection of Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.Document + + + + + + + + + +A ``Document`` is a collection of ``Chunk``\ s. + + + A ``Corpus`` can +have a maximum of 10,000 ``Document``\ s. + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Immutable. Identifier. The ``Document`` resource name. The +ID (name excluding the `corpora/*/documents/` prefix) can +contain up to 40 characters that are lowercase alphanumeric +or dashes (-). The ID cannot start or end with a dash. If +the name is empty on create, a unique name will be derived +from ``display_name`` along with a 12 character random +suffix. Example: +``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c`` +
+`display_name` + +`str` + +Optional. The human-readable display name for the +``Document``. The display name must be no more than 512 +characters in length, including spaces. Example: "Semantic +Retriever Documentation". +
+`custom_metadata` + +`MutableSequence[google.ai.generativelanguage.CustomMetadata]` + +Optional. User provided custom metadata stored as key-value +pairs used for querying. A ``Document`` can have a maximum +of 20 ``CustomMetadata``. +
+`update_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The Timestamp of when the ``Document`` was last +updated. +
+`create_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The Timestamp of when the ``Document`` was +created. +
+ + + diff --git a/docs/api/google/generativeai/protos/EmbedContentRequest.md b/docs/api/google/generativeai/protos/EmbedContentRequest.md new file mode 100644 index 000000000..be4d52cc0 --- /dev/null +++ b/docs/api/google/generativeai/protos/EmbedContentRequest.md @@ -0,0 +1,103 @@ +description: Request containing the Content for the model to embed. + +
+ + +
+ +# google.generativeai.protos.EmbedContentRequest + + + + + + + + + +Request containing the ``Content`` for the model to embed. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The model's resource name. This serves as an ID +for the Model to use. + +This name should match a model name returned by the +``ListModels`` method. + +Format: ``models/{model}`` +
+`content` + +`google.ai.generativelanguage.Content` + +Required. The content to embed. Only the ``parts.text`` +fields will be counted. +
+`task_type` + +`google.ai.generativelanguage.TaskType` + +Optional. Optional task type for which the embeddings will +be used. Can only be set for ``models/embedding-001``. + +
+`title` + +`str` + +Optional. An optional title for the text. Only applicable +when TaskType is ``RETRIEVAL_DOCUMENT``. + +Note: Specifying a ``title`` for ``RETRIEVAL_DOCUMENT`` +provides better quality embeddings for retrieval. + +
+`output_dimensionality` + +`int` + +Optional. Optional reduced dimension for the output +embedding. If set, excessive values in the output embedding +are truncated from the end. Supported by newer models since +2024, and the earlier model (``models/embedding-001``) +cannot specify this value. + +
+ + + diff --git a/docs/api/google/generativeai/protos/EmbedContentResponse.md b/docs/api/google/generativeai/protos/EmbedContentResponse.md new file mode 100644 index 000000000..80fce6b73 --- /dev/null +++ b/docs/api/google/generativeai/protos/EmbedContentResponse.md @@ -0,0 +1,49 @@ +description: The response to an EmbedContentRequest. + +
+ + +
+ +# google.generativeai.protos.EmbedContentResponse + + + + + + + + + +The response to an ``EmbedContentRequest``. + + + + + + + + + + + + + + + +
+`embedding` + +`google.ai.generativelanguage.ContentEmbedding` + +Output only. The embedding generated from the +input content. +
+ + + diff --git a/docs/api/google/generativeai/protos/EmbedTextRequest.md b/docs/api/google/generativeai/protos/EmbedTextRequest.md new file mode 100644 index 000000000..e4078db68 --- /dev/null +++ b/docs/api/google/generativeai/protos/EmbedTextRequest.md @@ -0,0 +1,59 @@ +description: Request to get a text embedding from the model. + +
+ + +
+ +# google.generativeai.protos.EmbedTextRequest + + + + + + + + + +Request to get a text embedding from the model. + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The model name to use with the +format model=models/{model}. +
+`text` + +`str` + +Optional. The free-form input text that the +model will turn into an embedding. +
+ + + diff --git a/docs/api/google/generativeai/protos/EmbedTextResponse.md b/docs/api/google/generativeai/protos/EmbedTextResponse.md new file mode 100644 index 000000000..adaf5be7d --- /dev/null +++ b/docs/api/google/generativeai/protos/EmbedTextResponse.md @@ -0,0 +1,50 @@ +description: The response to a EmbedTextRequest. + +
+ + +
+ +# google.generativeai.protos.EmbedTextResponse + + + + + + + + + +The response to a EmbedTextRequest. + + + + + + + + + + + + + + + +
+`embedding` + +`google.ai.generativelanguage.Embedding` + +Output only. The embedding generated from the +input text. + +
+ + + diff --git a/docs/api/google/generativeai/protos/Embedding.md b/docs/api/google/generativeai/protos/Embedding.md new file mode 100644 index 000000000..7a8f70006 --- /dev/null +++ b/docs/api/google/generativeai/protos/Embedding.md @@ -0,0 +1,48 @@ +description: A list of floats representing the embedding. + +
+ + +
+ +# google.generativeai.protos.Embedding + + + + + + + + + +A list of floats representing the embedding. + + + + + + + + + + + + + + + +
+`value` + +`MutableSequence[float]` + +The embedding values. +
+ + + diff --git a/docs/api/google/generativeai/protos/Example.md b/docs/api/google/generativeai/protos/Example.md new file mode 100644 index 000000000..59aecebe1 --- /dev/null +++ b/docs/api/google/generativeai/protos/Example.md @@ -0,0 +1,60 @@ +description: An input/output example used to instruct the Model. + +
+ + +
+ +# google.generativeai.protos.Example + + + + + + + + + +An input/output example used to instruct the Model. + + + +It demonstrates how the model should respond or format its +response. + + + + + + + + + + + + + + + +
+`input` + +`google.ai.generativelanguage.Message` + +Required. An example of an input ``Message`` from the user. +
+`output` + +`google.ai.generativelanguage.Message` + +Required. An example of what the model should +output given the input. +
+ + + diff --git a/docs/api/google/generativeai/protos/ExecutableCode.md b/docs/api/google/generativeai/protos/ExecutableCode.md new file mode 100644 index 000000000..cb75a47b8 --- /dev/null +++ b/docs/api/google/generativeai/protos/ExecutableCode.md @@ -0,0 +1,64 @@ +description: Code generated by the model that is meant to be executed, and the result returned to the model. + +
+ + + +
+ +# google.generativeai.protos.ExecutableCode + + + + + + + + + +Code generated by the model that is meant to be executed, and the result returned to the model. + + + +Only generated when using the ``CodeExecution`` tool, in which the +code will be automatically executed, and a corresponding +``CodeExecutionResult`` will also be generated. + + + + + + + + + + + + + + + +
+`language` + +`google.ai.generativelanguage.ExecutableCode.Language` + +Required. Programming language of the ``code``. +
+`code` + +`str` + +Required. The code to be executed. +
+ + + +## Child Classes +[`class Language`](../../../google/generativeai/protos/ExecutableCode/Language.md) + diff --git a/docs/api/google/generativeai/protos/ExecutableCode/Language.md b/docs/api/google/generativeai/protos/ExecutableCode/Language.md new file mode 100644 index 000000000..23184a73a --- /dev/null +++ b/docs/api/google/generativeai/protos/ExecutableCode/Language.md @@ -0,0 +1,663 @@ +description: Supported programming languages for the generated code. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.ExecutableCode.Language + + + + + + + + + +Supported programming languages for the generated code. + + + + + + + + + + + + + + + + + + + + +
+`LANGUAGE_UNSPECIFIED` + +`0` + +Unspecified language. This value should not +be used. +
+`PYTHON` + +`1` + +Python >= 3.10, with numpy and simpy +available. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + +
+LANGUAGE_UNSPECIFIED + +`` +
+PYTHON + +`` +
+ diff --git a/docs/api/google/generativeai/protos/File.md b/docs/api/google/generativeai/protos/File.md new file mode 100644 index 000000000..71a2df334 --- /dev/null +++ b/docs/api/google/generativeai/protos/File.md @@ -0,0 +1,164 @@ +description: A file uploaded to the API. + +
+ + + +
+ +# google.generativeai.protos.File + + + + + + + + + +A file uploaded to the API. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`video_metadata` + +`google.ai.generativelanguage.VideoMetadata` + +Output only. Metadata for a video. + +This field is a member of `oneof`_ ``metadata``. +
+`name` + +`str` + +Immutable. Identifier. The ``File`` resource name. The ID +(name excluding the "files/" prefix) can contain up to 40 +characters that are lowercase alphanumeric or dashes (-). +The ID cannot start or end with a dash. If the name is empty +on create, a unique name will be generated. Example: +``files/123-456`` +
+`display_name` + +`str` + +Optional. The human-readable display name for the ``File``. +The display name must be no more than 512 characters in +length, including spaces. Example: "Welcome Image". +
+`mime_type` + +`str` + +Output only. MIME type of the file. +
+`size_bytes` + +`int` + +Output only. Size of the file in bytes. +
+`create_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp of when the ``File`` was created. +
+`update_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp of when the ``File`` was last +updated. +
+`expiration_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp of when the ``File`` will be +deleted. Only set if the ``File`` is scheduled to expire. +
+`sha256_hash` + +`bytes` + +Output only. SHA-256 hash of the uploaded +bytes. +
+`uri` + +`str` + +Output only. The uri of the ``File``. +
+`state` + +`google.ai.generativelanguage.File.State` + +Output only. Processing state of the File. +
+`error` + +`google.rpc.status_pb2.Status` + +Output only. Error status if File processing +failed. +
+ + + +## Child Classes +[`class State`](../../../google/generativeai/protos/File/State.md) + diff --git a/docs/api/google/generativeai/protos/File/State.md b/docs/api/google/generativeai/protos/File/State.md new file mode 100644 index 000000000..d515f5512 --- /dev/null +++ b/docs/api/google/generativeai/protos/File/State.md @@ -0,0 +1,698 @@ +description: States for the lifecycle of a File. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.File.State + + + + + + + + + +States for the lifecycle of a File. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`STATE_UNSPECIFIED` + +`0` + +The default value. This value is used if the +state is omitted. +
+`PROCESSING` + +`1` + +File is being processed and cannot be used +for inference yet. +
+`ACTIVE` + +`2` + +File is processed and available for +inference. +
+`FAILED` + +`10` + +File failed processing. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+ACTIVE + +`` +
+FAILED + +`` +
+PROCESSING + +`` +
+STATE_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/protos/FileData.md b/docs/api/google/generativeai/protos/FileData.md new file mode 100644 index 000000000..d5c4678b5 --- /dev/null +++ b/docs/api/google/generativeai/protos/FileData.md @@ -0,0 +1,58 @@ +description: URI based data. + +
+ + +
+ +# google.generativeai.protos.FileData + + + + + + + + + +URI based data. + + + + + + + + + + + + + + + + + + +
+`mime_type` + +`str` + +Optional. The IANA standard MIME type of the +source data. +
+`file_uri` + +`str` + +Required. URI. +
+ + + diff --git a/docs/api/google/generativeai/protos/FunctionCall.md b/docs/api/google/generativeai/protos/FunctionCall.md new file mode 100644 index 000000000..68132319b --- /dev/null +++ b/docs/api/google/generativeai/protos/FunctionCall.md @@ -0,0 +1,62 @@ +description: A predicted FunctionCall returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values. + +
+ + +
+ +# google.generativeai.protos.FunctionCall + + + + + + + + + +A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name with the arguments and their values. + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the function to call. +Must be a-z, A-Z, 0-9, or contain underscores +and dashes, with a maximum length of 63. +
+`args` + +`google.protobuf.struct_pb2.Struct` + +Optional. The function parameters and values +in JSON object format. + +
+ + + diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig.md b/docs/api/google/generativeai/protos/FunctionCallingConfig.md new file mode 100644 index 000000000..f4262e213 --- /dev/null +++ b/docs/api/google/generativeai/protos/FunctionCallingConfig.md @@ -0,0 +1,69 @@ +description: Configuration for specifying function calling behavior. + +
+ + + +
+ +# google.generativeai.protos.FunctionCallingConfig + + + + + + + + + +Configuration for specifying function calling behavior. + + + + + + + + + + + + + + + + + + +
+`mode` + +`google.ai.generativelanguage.FunctionCallingConfig.Mode` + +Optional. Specifies the mode in which +function calling should execute. If unspecified, +the default value will be set to AUTO. +
+`allowed_function_names` + +`MutableSequence[str]` + +Optional. A set of function names that, when provided, +limits the functions the model will call. + +This should only be set when the Mode is ANY. Function names +should match [FunctionDeclaration.name]. With mode set to +ANY, model will predict a function call from the set of +function names provided. +
+ + + +## Child Classes +[`class Mode`](../../../google/generativeai/protos/FunctionCallingConfig/Mode.md) + diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md new file mode 100644 index 000000000..1a3483e8e --- /dev/null +++ b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md @@ -0,0 +1,704 @@ +description: Defines the execution behavior for function calling by defining the execution mode. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.FunctionCallingConfig.Mode + + + + + + + + + +Defines the execution behavior for function calling by defining the execution mode. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`MODE_UNSPECIFIED` + +`0` + +Unspecified function calling mode. This value +should not be used. +
+`AUTO` + +`1` + +Default model behavior, model decides to +predict either a function call or a natural +language response. +
+`ANY` + +`2` + +Model is constrained to always predicting a function call +only. If "allowed_function_names" are set, the predicted +function call will be limited to any one of +"allowed_function_names", else the predicted function call +will be any one of the provided "function_declarations". +
+`NONE` + +`3` + +Model will not predict any function call. +Model behavior is same as when not passing any +function declarations. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+ANY + +`` +
+AUTO + +`` +
+MODE_UNSPECIFIED + +`` +
+NONE + +`` +
+ diff --git a/docs/api/google/generativeai/protos/FunctionDeclaration.md b/docs/api/google/generativeai/protos/FunctionDeclaration.md new file mode 100644 index 000000000..62bc81229 --- /dev/null +++ b/docs/api/google/generativeai/protos/FunctionDeclaration.md @@ -0,0 +1,80 @@ +description: Structured representation of a function declaration as defined by the OpenAPI 3.03 specification __. + +
+ + +
+ +# google.generativeai.protos.FunctionDeclaration + + + + + + + + + +Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__. + + + Included in +this declaration are the function name and parameters. This +FunctionDeclaration is a representation of a block of code that can +be used as a ``Tool`` by the model and executed by the client. + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the function. +Must be a-z, A-Z, 0-9, or contain underscores +and dashes, with a maximum length of 63. +
+`description` + +`str` + +Required. A brief description of the +function. +
+`parameters` + +`google.ai.generativelanguage.Schema` + +Optional. Describes the parameters to this +function. Reflects the Open API 3.03 Parameter +Object string Key: the name of the parameter. +Parameter names are case sensitive. Schema +Value: the Schema defining the type used for the +parameter. + +
+ + + diff --git a/docs/api/google/generativeai/protos/FunctionResponse.md b/docs/api/google/generativeai/protos/FunctionResponse.md new file mode 100644 index 000000000..024421e79 --- /dev/null +++ b/docs/api/google/generativeai/protos/FunctionResponse.md @@ -0,0 +1,61 @@ +description: The result output from a FunctionCall that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. + +
+ + +
+ +# google.generativeai.protos.FunctionResponse + + + + + + + + + +The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name and a structured JSON object containing any output from the function is used as context to the model. + + + This should contain the result of a\ ``FunctionCall`` +made based on model prediction. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the function to call. +Must be a-z, A-Z, 0-9, or contain underscores +and dashes, with a maximum length of 63. +
+`response` + +`google.protobuf.struct_pb2.Struct` + +Required. The function response in JSON +object format. +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md new file mode 100644 index 000000000..b06a2302b --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md @@ -0,0 +1,151 @@ +description: Request to generate a grounded answer from the model. + +
+ + + +
+ +# google.generativeai.protos.GenerateAnswerRequest + + + + + + + + + +Request to generate a grounded answer from the model. + + + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`inline_passages` + +`google.ai.generativelanguage.GroundingPassages` + +Passages provided inline with the request. + +This field is a member of `oneof`_ ``grounding_source``. +
+`semantic_retriever` + +`google.ai.generativelanguage.SemanticRetrieverConfig` + +Content retrieved from resources created via +the Semantic Retriever API. + +This field is a member of `oneof`_ ``grounding_source``. +
+`model` + +`str` + +Required. The name of the ``Model`` to use for generating +the grounded response. + +Format: ``model=models/{model}``. +
+`contents` + +`MutableSequence[google.ai.generativelanguage.Content]` + +Required. The content of the current conversation with the +model. For single-turn queries, this is a single question to +answer. For multi-turn queries, this is a repeated field +that contains conversation history and the last ``Content`` +in the list containing the question. + +Note: GenerateAnswer currently only supports queries in +English. +
+`answer_style` + +`google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle` + +Required. Style in which answers should be +returned. +
+`safety_settings` + +`MutableSequence[google.ai.generativelanguage.SafetySetting]` + +Optional. A list of unique ``SafetySetting`` instances for +blocking unsafe content. + +This will be enforced on the +GenerateAnswerRequest.contents and +``GenerateAnswerResponse.candidate``. There should not be +more than one setting for each ``SafetyCategory`` type. The +API will block any contents and responses that fail to meet +the thresholds set by these settings. This list overrides +the default settings for each ``SafetyCategory`` specified +in the safety_settings. If there is no ``SafetySetting`` for +a given ``SafetyCategory`` provided in the list, the API +will use the default safety setting for that category. Harm +categories HARM_CATEGORY_HATE_SPEECH, +HARM_CATEGORY_SEXUALLY_EXPLICIT, +HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT +are supported. +
+`temperature` + +`float` + +Optional. Controls the randomness of the output. + +Values can range from [0.0,1.0], inclusive. A value closer +to 1.0 will produce responses that are more varied and +creative, while a value closer to 0.0 will typically result +in more straightforward responses from the model. A low +temperature (~0.2) is usually recommended for +Attributed-Question-Answering use cases. + +
+ + + +## Child Classes +[`class AnswerStyle`](../../../google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md) + diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md new file mode 100644 index 000000000..242d337dd --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md @@ -0,0 +1,698 @@ +description: Style for grounded answers. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.GenerateAnswerRequest.AnswerStyle + + + + + + + + + +Style for grounded answers. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`ANSWER_STYLE_UNSPECIFIED` + +`0` + +Unspecified answer style. +
+`ABSTRACTIVE` + +`1` + +Succint but abstract style. +
+`EXTRACTIVE` + +`2` + +Very brief and extractive style. +
+`VERBOSE` + +`3` + +Verbose style including extra details. The +response may be formatted as a sentence, +paragraph, multiple paragraphs, or bullet +points, etc. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+ABSTRACTIVE + +`` +
+ANSWER_STYLE_UNSPECIFIED + +`` +
+EXTRACTIVE + +`` +
+VERBOSE + +`` +
+ diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md new file mode 100644 index 000000000..e4f1ace8c --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md @@ -0,0 +1,104 @@ +description: Response from the model for a grounded answer. + +
+ + + +
+ +# google.generativeai.protos.GenerateAnswerResponse + + + + + + + + + +Response from the model for a grounded answer. + + + + + + + + + + + + + + + + + + + + + +
+`answer` + +`google.ai.generativelanguage.Candidate` + +Candidate answer from the model. + +Note: The model *always* attempts to provide a grounded +answer, even when the answer is unlikely to be answerable +from the given passages. In that case, a low-quality or +ungrounded answer may be provided, along with a low +``answerable_probability``. +
+`answerable_probability` + +`float` + +Output only. The model's estimate of the probability that +its answer is correct and grounded in the input passages. + +A low answerable_probability indicates that the answer might +not be grounded in the sources. + +When ``answerable_probability`` is low, some clients may +wish to: + +- Display a message to the effect of "We couldn’t answer + that question" to the user. +- Fall back to a general-purpose LLM that answers the + question from world knowledge. The threshold and nature + of such fallbacks will depend on individual clients’ use + cases. 0.5 is a good starting threshold. + +
+`input_feedback` + +`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback` + +Output only. Feedback related to the input data used to +answer the question, as opposed to model-generated response +to the question. + +"Input data" can be one or more of the following: + +- Question specified by the last entry in + ``GenerateAnswerRequest.content`` +- Conversation history specified by the other entries in + ``GenerateAnswerRequest.content`` +- Grounding sources + (GenerateAnswerRequest.semantic_retriever or + GenerateAnswerRequest.inline_passages) + +
+ + + +## Child Classes +[`class InputFeedback`](../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md) + diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md new file mode 100644 index 000000000..a987bd401 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md @@ -0,0 +1,65 @@ +description: Feedback related to the input data used to answer the question, as opposed to model-generated response to the question. + +
+ + + +
+ +# google.generativeai.protos.GenerateAnswerResponse.InputFeedback + + + + + + + + + +Feedback related to the input data used to answer the question, as opposed to model-generated response to the question. + + + + + + + + + + + + + + + + + + + +
+`block_reason` + +`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback.BlockReason` + +Optional. If set, the input was blocked and +no candidates are returned. Rephrase your input. + +
+`safety_ratings` + +`MutableSequence[google.ai.generativelanguage.SafetyRating]` + +Ratings for safety of the input. +There is at most one rating per category. +
+ + + +## Child Classes +[`class BlockReason`](../../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md) + diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md new file mode 100644 index 000000000..f0141fdf4 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md @@ -0,0 +1,680 @@ +description: Specifies what was the reason why input was blocked. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason + + + + + + + + + +Specifies what was the reason why input was blocked. + + + + + + + + + + + + + + + + + + + + + + + +
+`BLOCK_REASON_UNSPECIFIED` + +`0` + +Default value. This value is unused. +
+`SAFETY` + +`1` + +Input was blocked due to safety reasons. You can inspect +``safety_ratings`` to understand which safety category +blocked it. +
+`OTHER` + +`2` + +Input was blocked due to other reasons. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + +
+BLOCK_REASON_UNSPECIFIED + +`` +
+OTHER + +`` +
+SAFETY + +`` +
+ diff --git a/docs/api/google/generativeai/protos/GenerateContentRequest.md b/docs/api/google/generativeai/protos/GenerateContentRequest.md new file mode 100644 index 000000000..75ec43867 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateContentRequest.md @@ -0,0 +1,151 @@ +description: Request to generate a completion from the model. + +
+ + +
+ +# google.generativeai.protos.GenerateContentRequest + + + + + + + + + +Request to generate a completion from the model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The name of the ``Model`` to use for generating +the completion. + +Format: ``name=models/{model}``. +
+`system_instruction` + +`google.ai.generativelanguage.Content` + +Optional. Developer set system instruction. +Currently, text only. + +
+`contents` + +`MutableSequence[google.ai.generativelanguage.Content]` + +Required. The content of the current +conversation with the model. +For single-turn queries, this is a single +instance. For multi-turn queries, this is a +repeated field that contains conversation +history + latest request. +
+`tools` + +`MutableSequence[google.ai.generativelanguage.Tool]` + +Optional. A list of ``Tools`` the model may use to generate +the next response. + +A ``Tool`` is a piece of code that enables the system to +interact with external systems to perform an action, or set +of actions, outside of knowledge and scope of the model. The +only supported tool is currently ``Function``. +
+`tool_config` + +`google.ai.generativelanguage.ToolConfig` + +Optional. Tool configuration for any ``Tool`` specified in +the request. +
+`safety_settings` + +`MutableSequence[google.ai.generativelanguage.SafetySetting]` + +Optional. A list of unique ``SafetySetting`` instances for +blocking unsafe content. + +This will be enforced on the +GenerateContentRequest.contents and +GenerateContentResponse.candidates. There should not be +more than one setting for each ``SafetyCategory`` type. The +API will block any contents and responses that fail to meet +the thresholds set by these settings. This list overrides +the default settings for each ``SafetyCategory`` specified +in the safety_settings. If there is no ``SafetySetting`` for +a given ``SafetyCategory`` provided in the list, the API +will use the default safety setting for that category. Harm +categories HARM_CATEGORY_HATE_SPEECH, +HARM_CATEGORY_SEXUALLY_EXPLICIT, +HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT +are supported. +
+`generation_config` + +`google.ai.generativelanguage.GenerationConfig` + +Optional. Configuration options for model +generation and outputs. + +
+`cached_content` + +`str` + +Optional. The name of the cached content used as context to +serve the prediction. Note: only used in explicit caching, +where users can have control over caching (e.g. what content +to cache) and enjoy guaranteed cost savings. Format: +``cachedContents/{cachedContent}`` + +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse.md b/docs/api/google/generativeai/protos/GenerateContentResponse.md new file mode 100644 index 000000000..b7b2172a6 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateContentResponse.md @@ -0,0 +1,86 @@ +description: Response from the model supporting multiple candidates. + +
+ + + + +
+ +# google.generativeai.protos.GenerateContentResponse + + + + + + + + + +Response from the model supporting multiple candidates. + + + +Note on safety ratings and content filtering. They are reported for +both prompt in GenerateContentResponse.prompt_feedback and for +each candidate in ``finish_reason`` and in ``safety_ratings``. The +API contract is that: + +- either all requested candidates are returned or no candidates at + all +- no candidates are returned only if there was something wrong with + the prompt (see ``prompt_feedback``) +- feedback on each candidate is reported on ``finish_reason`` and + ``safety_ratings``. + + + + + + + + + + + + + + + + + + +
+`candidates` + +`MutableSequence[google.ai.generativelanguage.Candidate]` + +Candidate responses from the model. +
+`prompt_feedback` + +`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback` + +Returns the prompt's feedback related to the +content filters. +
+`usage_metadata` + +`google.ai.generativelanguage.GenerateContentResponse.UsageMetadata` + +Output only. Metadata on the generation +requests' token usage. +
+ + + +## Child Classes +[`class PromptFeedback`](../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback.md) + +[`class UsageMetadata`](../../../google/generativeai/protos/GenerateContentResponse/UsageMetadata.md) + diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md new file mode 100644 index 000000000..f953cebc6 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md @@ -0,0 +1,64 @@ +description: A set of the feedback metadata the prompt specified in GenerateContentRequest.content. + +
+ + + +
+ +# google.generativeai.protos.GenerateContentResponse.PromptFeedback + + + + + + + + + +A set of the feedback metadata the prompt specified in ``GenerateContentRequest.content``. + + + + + + + + + + + + + + + + + + +
+`block_reason` + +`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback.BlockReason` + +Optional. If set, the prompt was blocked and +no candidates are returned. Rephrase your +prompt. +
+`safety_ratings` + +`MutableSequence[google.ai.generativelanguage.SafetyRating]` + +Ratings for safety of the prompt. +There is at most one rating per category. +
+ + + +## Child Classes +[`class BlockReason`](../../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md) + diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md new file mode 100644 index 000000000..bd62879f0 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md @@ -0,0 +1,680 @@ +description: Specifies what was the reason why prompt was blocked. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason + + + + + + + + + +Specifies what was the reason why prompt was blocked. + + + + + + + + + + + + + + + + + + + + + + + +
+`BLOCK_REASON_UNSPECIFIED` + +`0` + +Default value. This value is unused. +
+`SAFETY` + +`1` + +Prompt was blocked due to safety reasons. You can inspect +``safety_ratings`` to understand which safety category +blocked it. +
+`OTHER` + +`2` + +Prompt was blocked due to unknown reasons. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + +
+BLOCK_REASON_UNSPECIFIED + +`` +
+OTHER + +`` +
+SAFETY + +`` +
+ diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md new file mode 100644 index 000000000..8a5c0e431 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md @@ -0,0 +1,80 @@ +description: Metadata on the generation request's token usage. + +
+ + +
+ +# google.generativeai.protos.GenerateContentResponse.UsageMetadata + + + + + + + + + +Metadata on the generation request's token usage. + + + + + + + + + + + + + + + + + + + + + + + + +
+`prompt_token_count` + +`int` + +Number of tokens in the prompt. When cached_content is set, +this is still the total effective prompt size. I.e. this +includes the number of tokens in the cached content. +
+`cached_content_token_count` + +`int` + +Number of tokens in the cached part of the +prompt, i.e. in the cached content. +
+`candidates_token_count` + +`int` + +Total number of tokens across the generated +candidates. +
+`total_token_count` + +`int` + +Total token count for the generation request +(prompt + candidates). +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerateMessageRequest.md b/docs/api/google/generativeai/protos/GenerateMessageRequest.md new file mode 100644 index 000000000..9884bf972 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateMessageRequest.md @@ -0,0 +1,124 @@ +description: Request to generate a message response from the model. + +
+ + +
+ +# google.generativeai.protos.GenerateMessageRequest + + + + + + + + + +Request to generate a message response from the model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The name of the model to use. + +Format: ``name=models/{model}``. +
+`prompt` + +`google.ai.generativelanguage.MessagePrompt` + +Required. The structured textual input given +to the model as a prompt. +Given a +prompt, the model will return what it predicts +is the next message in the discussion. +
+`temperature` + +`float` + +Optional. Controls the randomness of the output. + +Values can range over ``[0.0,1.0]``, inclusive. A value +closer to ``1.0`` will produce responses that are more +varied, while a value closer to ``0.0`` will typically +result in less surprising responses from the model. + +
+`candidate_count` + +`int` + +Optional. The number of generated response messages to +return. + +This value must be between ``[1, 8]``, inclusive. If unset, +this will default to ``1``. + +
+`top_p` + +`float` + +Optional. The maximum cumulative probability of tokens to +consider when sampling. + +The model uses combined Top-k and nucleus sampling. + +Nucleus sampling considers the smallest set of tokens whose +probability sum is at least ``top_p``. + +
+`top_k` + +`int` + +Optional. The maximum number of tokens to consider when +sampling. + +The model uses combined Top-k and nucleus sampling. + +Top-k sampling considers the set of ``top_k`` most probable +tokens. + +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerateMessageResponse.md b/docs/api/google/generativeai/protos/GenerateMessageResponse.md new file mode 100644 index 000000000..91a07ac42 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateMessageResponse.md @@ -0,0 +1,75 @@ +description: The response from the model. + +
+ + +
+ +# google.generativeai.protos.GenerateMessageResponse + + + + + + + + + +The response from the model. + + + +This includes candidate messages and +conversation history in the form of chronologically-ordered +messages. + + + + + + + + + + + + + + + + + + +
+`candidates` + +`MutableSequence[google.ai.generativelanguage.Message]` + +Candidate response messages from the model. +
+`messages` + +`MutableSequence[google.ai.generativelanguage.Message]` + +The conversation history used by the model. +
+`filters` + +`MutableSequence[google.ai.generativelanguage.ContentFilter]` + +A set of content filtering metadata for the prompt and +response text. + +This indicates which ``SafetyCategory``\ (s) blocked a +candidate from this response, the lowest ``HarmProbability`` +that triggered a block, and the HarmThreshold setting for +that category. +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerateTextRequest.md b/docs/api/google/generativeai/protos/GenerateTextRequest.md new file mode 100644 index 000000000..05d08c518 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateTextRequest.md @@ -0,0 +1,189 @@ +description: Request to generate a text completion response from the model. + +
+ + +
+ +# google.generativeai.protos.GenerateTextRequest + + + + + + + + + +Request to generate a text completion response from the model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`model` + +`str` + +Required. The name of the ``Model`` or ``TunedModel`` to use +for generating the completion. Examples: +models/text-bison-001 tunedModels/sentence-translator-u3b7m +
+`prompt` + +`google.ai.generativelanguage.TextPrompt` + +Required. The free-form input text given to +the model as a prompt. +Given a prompt, the model will generate a +TextCompletion response it predicts as the +completion of the input text. +
+`temperature` + +`float` + +Optional. Controls the randomness of the output. Note: The +default value varies by model, see the Model.temperature +attribute of the ``Model`` returned the ``getModel`` +function. + +Values can range from [0.0,1.0], inclusive. A value closer +to 1.0 will produce responses that are more varied and +creative, while a value closer to 0.0 will typically result +in more straightforward responses from the model. + +
+`candidate_count` + +`int` + +Optional. Number of generated responses to return. + +This value must be between [1, 8], inclusive. If unset, this +will default to 1. + +
+`max_output_tokens` + +`int` + +Optional. The maximum number of tokens to include in a +candidate. + +If unset, this will default to output_token_limit specified +in the ``Model`` specification. + +
+`top_p` + +`float` + +Optional. The maximum cumulative probability of tokens to +consider when sampling. + +The model uses combined Top-k and nucleus sampling. + +Tokens are sorted based on their assigned probabilities so +that only the most likely tokens are considered. Top-k +sampling directly limits the maximum number of tokens to +consider, while Nucleus sampling limits number of tokens +based on the cumulative probability. + +Note: The default value varies by model, see the +Model.top_p attribute of the ``Model`` returned the +``getModel`` function. + +
+`top_k` + +`int` + +Optional. The maximum number of tokens to consider when +sampling. + +The model uses combined Top-k and nucleus sampling. + +Top-k sampling considers the set of ``top_k`` most probable +tokens. Defaults to 40. + +Note: The default value varies by model, see the +Model.top_k attribute of the ``Model`` returned the +``getModel`` function. + +
+`safety_settings` + +`MutableSequence[google.ai.generativelanguage.SafetySetting]` + +Optional. A list of unique ``SafetySetting`` instances for +blocking unsafe content. + +that will be enforced on the GenerateTextRequest.prompt +and GenerateTextResponse.candidates. There should not be +more than one setting for each ``SafetyCategory`` type. The +API will block any prompts and responses that fail to meet +the thresholds set by these settings. This list overrides +the default settings for each ``SafetyCategory`` specified +in the safety_settings. If there is no ``SafetySetting`` for +a given ``SafetyCategory`` provided in the list, the API +will use the default safety setting for that category. Harm +categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY, +HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL, +HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported +in text service. +
+`stop_sequences` + +`MutableSequence[str]` + +The set of character sequences (up to 5) that +will stop output generation. If specified, the +API will stop at the first appearance of a stop +sequence. The stop sequence will not be included +as part of the response. +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerateTextResponse.md b/docs/api/google/generativeai/protos/GenerateTextResponse.md new file mode 100644 index 000000000..16ba38748 --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerateTextResponse.md @@ -0,0 +1,78 @@ +description: The response from the model, including candidate completions. + +
+ + +
+ +# google.generativeai.protos.GenerateTextResponse + + + + + + + + + +The response from the model, including candidate completions. + + + + + + + + + + + + + + + + + + + + + +
+`candidates` + +`MutableSequence[google.ai.generativelanguage.TextCompletion]` + +Candidate responses from the model. +
+`filters` + +`MutableSequence[google.ai.generativelanguage.ContentFilter]` + +A set of content filtering metadata for the prompt and +response text. + +This indicates which ``SafetyCategory``\ (s) blocked a +candidate from this response, the lowest ``HarmProbability`` +that triggered a block, and the HarmThreshold setting for +that category. This indicates the smallest change to the +``SafetySettings`` that would be necessary to unblock at +least 1 response. + +The blocking is configured by the ``SafetySettings`` in the +request (or the default ``SafetySettings`` of the API). +
+`safety_feedback` + +`MutableSequence[google.ai.generativelanguage.SafetyFeedback]` + +Returns any safety feedback related to +content filtering. +
+ + + diff --git a/docs/api/google/generativeai/protos/GenerationConfig.md b/docs/api/google/generativeai/protos/GenerationConfig.md new file mode 100644 index 000000000..33a0b88bc --- /dev/null +++ b/docs/api/google/generativeai/protos/GenerationConfig.md @@ -0,0 +1,172 @@ +description: Configuration options for model generation and outputs. + +
+ + +
+ +# google.generativeai.protos.GenerationConfig + + + + + + + + + +Configuration options for model generation and outputs. + + + Not +all parameters may be configurable for every model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`candidate_count` + +`int` + +Optional. Number of generated responses to +return. +Currently, this value can only be set to 1. If +unset, this will default to 1. + +
+`stop_sequences` + +`MutableSequence[str]` + +Optional. The set of character sequences (up +to 5) that will stop output generation. If +specified, the API will stop at the first +appearance of a stop sequence. The stop sequence +will not be included as part of the response. +
+`max_output_tokens` + +`int` + +Optional. The maximum number of tokens to include in a +candidate. + +Note: The default value varies by model, see the +Model.output_token_limit attribute of the ``Model`` +returned from the ``getModel`` function. + +
+`temperature` + +`float` + +Optional. Controls the randomness of the output. + +Note: The default value varies by model, see the +Model.temperature attribute of the ``Model`` returned +from the ``getModel`` function. + +Values can range from [0.0, 2.0]. + +
+`top_p` + +`float` + +Optional. The maximum cumulative probability of tokens to +consider when sampling. + +The model uses combined Top-k and nucleus sampling. + +Tokens are sorted based on their assigned probabilities so +that only the most likely tokens are considered. Top-k +sampling directly limits the maximum number of tokens to +consider, while Nucleus sampling limits number of tokens +based on the cumulative probability. + +Note: The default value varies by model, see the +Model.top_p attribute of the ``Model`` returned from the +``getModel`` function. + +
+`top_k` + +`int` + +Optional. The maximum number of tokens to consider when +sampling. + +Models use nucleus sampling or combined Top-k and nucleus +sampling. Top-k sampling considers the set of ``top_k`` most +probable tokens. Models running with nucleus sampling don't +allow top_k setting. + +Note: The default value varies by model, see the +Model.top_k attribute of the ``Model`` returned from the +``getModel`` function. Empty ``top_k`` field in ``Model`` +indicates the model doesn't apply top-k sampling and doesn't +allow setting ``top_k`` on requests. + +
+`response_mime_type` + +`str` + +Optional. Output response mimetype of the generated +candidate text. Supported mimetype: ``text/plain``: +(default) Text output. ``application/json``: JSON response +in the candidates. +
+`response_schema` + +`google.ai.generativelanguage.Schema` + +Optional. Output response schema of the generated candidate +text when response mime type can have schema. Schema can be +objects, primitives or arrays and is a subset of `OpenAPI +schema `__. + +If set, a compatible response_mime_type must also be set. +Compatible mimetypes: ``application/json``: Schema for JSON +response. +
+ + + diff --git a/docs/api/google/generativeai/protos/GetCachedContentRequest.md b/docs/api/google/generativeai/protos/GetCachedContentRequest.md new file mode 100644 index 000000000..c9cab78c1 --- /dev/null +++ b/docs/api/google/generativeai/protos/GetCachedContentRequest.md @@ -0,0 +1,49 @@ +description: Request to read CachedContent. + +
+ + +
+ +# google.generativeai.protos.GetCachedContentRequest + + + + + + + + + +Request to read CachedContent. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name referring to the content cache +entry. Format: ``cachedContents/{id}`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetChunkRequest.md b/docs/api/google/generativeai/protos/GetChunkRequest.md new file mode 100644 index 000000000..575de2729 --- /dev/null +++ b/docs/api/google/generativeai/protos/GetChunkRequest.md @@ -0,0 +1,49 @@ +description: Request for getting information about a specific Chunk. + +
+ + +
+ +# google.generativeai.protos.GetChunkRequest + + + + + + + + + +Request for getting information about a specific ``Chunk``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``Chunk`` to retrieve. Example: +``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetCorpusRequest.md b/docs/api/google/generativeai/protos/GetCorpusRequest.md new file mode 100644 index 000000000..fe4d3092b --- /dev/null +++ b/docs/api/google/generativeai/protos/GetCorpusRequest.md @@ -0,0 +1,49 @@ +description: Request for getting information about a specific Corpus. + +
+ + +
+ +# google.generativeai.protos.GetCorpusRequest + + + + + + + + + +Request for getting information about a specific ``Corpus``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``Corpus``. Example: +``corpora/my-corpus-123`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetDocumentRequest.md b/docs/api/google/generativeai/protos/GetDocumentRequest.md new file mode 100644 index 000000000..1959ea46c --- /dev/null +++ b/docs/api/google/generativeai/protos/GetDocumentRequest.md @@ -0,0 +1,49 @@ +description: Request for getting information about a specific Document. + +
+ + +
+ +# google.generativeai.protos.GetDocumentRequest + + + + + + + + + +Request for getting information about a specific ``Document``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``Document`` to retrieve. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetFileRequest.md b/docs/api/google/generativeai/protos/GetFileRequest.md new file mode 100644 index 000000000..de6b98e50 --- /dev/null +++ b/docs/api/google/generativeai/protos/GetFileRequest.md @@ -0,0 +1,49 @@ +description: Request for GetFile. + +
+ + +
+ +# google.generativeai.protos.GetFileRequest + + + + + + + + + +Request for ``GetFile``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``File`` to get. Example: +``files/abc-123`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetModelRequest.md b/docs/api/google/generativeai/protos/GetModelRequest.md new file mode 100644 index 000000000..de91a7abe --- /dev/null +++ b/docs/api/google/generativeai/protos/GetModelRequest.md @@ -0,0 +1,53 @@ +description: Request for getting information about a specific Model. + +
+ + +
+ +# google.generativeai.protos.GetModelRequest + + + + + + + + + +Request for getting information about a specific Model. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the model. + +This name should match a model name returned by the +``ListModels`` method. + +Format: ``models/{model}`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetPermissionRequest.md b/docs/api/google/generativeai/protos/GetPermissionRequest.md new file mode 100644 index 000000000..dd6850a87 --- /dev/null +++ b/docs/api/google/generativeai/protos/GetPermissionRequest.md @@ -0,0 +1,52 @@ +description: Request for getting information about a specific Permission. + +
+ + +
+ +# google.generativeai.protos.GetPermissionRequest + + + + + + + + + +Request for getting information about a specific ``Permission``. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the permission. + +Formats: +``tunedModels/{tuned_model}/permissions/{permission}`` +``corpora/{corpus}/permissions/{permission}`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GetTunedModelRequest.md b/docs/api/google/generativeai/protos/GetTunedModelRequest.md new file mode 100644 index 000000000..c34e11e1d --- /dev/null +++ b/docs/api/google/generativeai/protos/GetTunedModelRequest.md @@ -0,0 +1,50 @@ +description: Request for getting information about a specific Model. + +
+ + +
+ +# google.generativeai.protos.GetTunedModelRequest + + + + + + + + + +Request for getting information about a specific Model. + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the model. + +Format: ``tunedModels/my-model-id`` +
+ + + diff --git a/docs/api/google/generativeai/protos/GroundingAttribution.md b/docs/api/google/generativeai/protos/GroundingAttribution.md new file mode 100644 index 000000000..553ea8533 --- /dev/null +++ b/docs/api/google/generativeai/protos/GroundingAttribution.md @@ -0,0 +1,59 @@ +description: Attribution for a source that contributed to an answer. + +
+ + +
+ +# google.generativeai.protos.GroundingAttribution + + + + + + + + + +Attribution for a source that contributed to an answer. + + + + + + + + + + + + + + + + + + +
+`source_id` + +`google.ai.generativelanguage.AttributionSourceId` + +Output only. Identifier for the source +contributing to this attribution. +
+`content` + +`google.ai.generativelanguage.Content` + +Grounding source content that makes up this +attribution. +
+ + + diff --git a/docs/api/google/generativeai/protos/GroundingPassage.md b/docs/api/google/generativeai/protos/GroundingPassage.md new file mode 100644 index 000000000..f63c2e18a --- /dev/null +++ b/docs/api/google/generativeai/protos/GroundingPassage.md @@ -0,0 +1,58 @@ +description: Passage included inline with a grounding configuration. + +
+ + +
+ +# google.generativeai.protos.GroundingPassage + + + + + + + + + +Passage included inline with a grounding configuration. + + + + + + + + + + + + + + + + + + +
+`id` + +`str` + +Identifier for the passage for attributing +this passage in grounded answers. +
+`content` + +`google.ai.generativelanguage.Content` + +Content of the passage. +
+ + + diff --git a/docs/api/google/generativeai/protos/GroundingPassages.md b/docs/api/google/generativeai/protos/GroundingPassages.md new file mode 100644 index 000000000..1cde963d3 --- /dev/null +++ b/docs/api/google/generativeai/protos/GroundingPassages.md @@ -0,0 +1,48 @@ +description: A repeated list of passages. + +
+ + +
+ +# google.generativeai.protos.GroundingPassages + + + + + + + + + +A repeated list of passages. + + + + + + + + + + + + + + + +
+`passages` + +`MutableSequence[google.ai.generativelanguage.GroundingPassage]` + +List of passages. +
+ + + diff --git a/docs/api/google/generativeai/protos/HarmCategory.md b/docs/api/google/generativeai/protos/HarmCategory.md new file mode 100644 index 000000000..54f1b3767 --- /dev/null +++ b/docs/api/google/generativeai/protos/HarmCategory.md @@ -0,0 +1,822 @@ +description: The category of a rating. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.HarmCategory + + + + + + + + + +The category of a rating. + + + + + + + +These categories cover various kinds of harms that developers +may wish to adjust. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`HARM_CATEGORY_UNSPECIFIED` + +`0` + +Category is unspecified. +
+`HARM_CATEGORY_DEROGATORY` + +`1` + +Negative or harmful comments targeting +identity and/or protected attribute. +
+`HARM_CATEGORY_TOXICITY` + +`2` + +Content that is rude, disrespectful, or +profane. +
+`HARM_CATEGORY_VIOLENCE` + +`3` + +Describes scenarios depicting violence +against an individual or group, or general +descriptions of gore. +
+`HARM_CATEGORY_SEXUAL` + +`4` + +Contains references to sexual acts or other +lewd content. +
+`HARM_CATEGORY_MEDICAL` + +`5` + +Promotes unchecked medical advice. +
+`HARM_CATEGORY_DANGEROUS` + +`6` + +Dangerous content that promotes, facilitates, +or encourages harmful acts. +
+`HARM_CATEGORY_HARASSMENT` + +`7` + +Harasment content. +
+`HARM_CATEGORY_HATE_SPEECH` + +`8` + +Hate speech and content. +
+`HARM_CATEGORY_SEXUALLY_EXPLICIT` + +`9` + +Sexually explicit content. +
+`HARM_CATEGORY_DANGEROUS_CONTENT` + +`10` + +Dangerous content. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+HARM_CATEGORY_DANGEROUS + +`` +
+HARM_CATEGORY_DANGEROUS_CONTENT + +`` +
+HARM_CATEGORY_DEROGATORY + +`` +
+HARM_CATEGORY_HARASSMENT + +`` +
+HARM_CATEGORY_HATE_SPEECH + +`` +
+HARM_CATEGORY_MEDICAL + +`` +
+HARM_CATEGORY_SEXUAL + +`` +
+HARM_CATEGORY_SEXUALLY_EXPLICIT + +`` +
+HARM_CATEGORY_TOXICITY + +`` +
+HARM_CATEGORY_UNSPECIFIED + +`` +
+HARM_CATEGORY_VIOLENCE + +`` +
+ diff --git a/docs/api/google/generativeai/protos/Hyperparameters.md b/docs/api/google/generativeai/protos/Hyperparameters.md new file mode 100644 index 000000000..f5e17d6ee --- /dev/null +++ b/docs/api/google/generativeai/protos/Hyperparameters.md @@ -0,0 +1,100 @@ +description: Hyperparameters controlling the tuning process. + +
+ + +
+ +# google.generativeai.protos.Hyperparameters + + + + + + + + + +Hyperparameters controlling the tuning process. + + + Read more at +https://ai.google.dev/docs/model_tuning_guidance + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + + + + +
+`learning_rate` + +`float` + +Optional. Immutable. The learning rate +hyperparameter for tuning. If not set, a default +of 0.001 or 0.0002 will be calculated based on +the number of training examples. + +This field is a member of `oneof`_ ``learning_rate_option``. +
+`learning_rate_multiplier` + +`float` + +Optional. Immutable. The learning rate multiplier is used to +calculate a final learning_rate based on the default +(recommended) value. Actual learning rate := +learning_rate_multiplier \* default learning rate Default +learning rate is dependent on base model and dataset size. +If not set, a default of 1.0 will be used. + +This field is a member of `oneof`_ ``learning_rate_option``. +
+`epoch_count` + +`int` + +Immutable. The number of training epochs. An +epoch is one pass through the training data. If +not set, a default of 5 will be used. + +
+`batch_size` + +`int` + +Immutable. The batch size hyperparameter for +tuning. If not set, a default of 4 or 16 will be +used based on the number of training examples. + +
+ + + diff --git a/docs/api/google/generativeai/protos/ListCachedContentsRequest.md b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md new file mode 100644 index 000000000..9d6da05b7 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md @@ -0,0 +1,68 @@ +description: Request to list CachedContents. + +
+ + +
+ +# google.generativeai.protos.ListCachedContentsRequest + + + + + + + + + +Request to list CachedContents. + + + + + + + + + + + + + + + + + + +
+`page_size` + +`int` + +Optional. The maximum number of cached +contents to return. The service may return fewer +than this value. If unspecified, some default +(under maximum) number of items will be +returned. The maximum value is 1000; values +above 1000 will be coerced to 1000. +
+`page_token` + +`str` + +Optional. A page token, received from a previous +``ListCachedContents`` call. Provide this to retrieve the +subsequent page. + +When paginating, all other parameters provided to +``ListCachedContents`` must match the call that provided the +page token. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListCachedContentsResponse.md b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md new file mode 100644 index 000000000..ba3e10e69 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md @@ -0,0 +1,59 @@ +description: Response with CachedContents list. + +
+ + +
+ +# google.generativeai.protos.ListCachedContentsResponse + + + + + + + + + +Response with CachedContents list. + + + + + + + + + + + + + + + + + + +
+`cached_contents` + +`MutableSequence[google.ai.generativelanguage.CachedContent]` + +List of cached contents. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. If this field is omitted, there are no subsequent +pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListChunksRequest.md b/docs/api/google/generativeai/protos/ListChunksRequest.md new file mode 100644 index 000000000..2150c0630 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListChunksRequest.md @@ -0,0 +1,80 @@ +description: Request for listing Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.ListChunksRequest + + + + + + + + + +Request for listing ``Chunk``\ s. + + + + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Required. The name of the ``Document`` containing +``Chunk``\ s. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+`page_size` + +`int` + +Optional. The maximum number of ``Chunk``\ s to return (per +page). The service may return fewer ``Chunk``\ s. + +If unspecified, at most 10 ``Chunk``\ s will be returned. +The maximum size limit is 100 ``Chunk``\ s per page. +
+`page_token` + +`str` + +Optional. A page token, received from a previous +``ListChunks`` call. + +Provide the ``next_page_token`` returned in the response as +an argument to the next request to retrieve the next page. + +When paginating, all other parameters provided to +``ListChunks`` must match the call that provided the page +token. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListChunksResponse.md b/docs/api/google/generativeai/protos/ListChunksResponse.md new file mode 100644 index 000000000..38178a68e --- /dev/null +++ b/docs/api/google/generativeai/protos/ListChunksResponse.md @@ -0,0 +1,60 @@ +description: Response from ListChunks containing a paginated list of Chunk\ s. + +
+ + +
+ +# google.generativeai.protos.ListChunksResponse + + + + + + + + + +Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s. + + + The ``Chunk``\ s are sorted by ascending +``chunk.create_time``. + + + + + + + + + + + + + + + +
+`chunks` + +`MutableSequence[google.ai.generativelanguage.Chunk]` + +The returned ``Chunk``\ s. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. If this field is omitted, there are no more +pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListCorporaRequest.md b/docs/api/google/generativeai/protos/ListCorporaRequest.md new file mode 100644 index 000000000..d6de5ef51 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListCorporaRequest.md @@ -0,0 +1,69 @@ +description: Request for listing Corpora. + +
+ + +
+ +# google.generativeai.protos.ListCorporaRequest + + + + + + + + + +Request for listing ``Corpora``. + + + + + + + + + + + + + + + + + + +
+`page_size` + +`int` + +Optional. The maximum number of ``Corpora`` to return (per +page). The service may return fewer ``Corpora``. + +If unspecified, at most 10 ``Corpora`` will be returned. The +maximum size limit is 20 ``Corpora`` per page. +
+`page_token` + +`str` + +Optional. A page token, received from a previous +``ListCorpora`` call. + +Provide the ``next_page_token`` returned in the response as +an argument to the next request to retrieve the next page. + +When paginating, all other parameters provided to +``ListCorpora`` must match the call that provided the page +token. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListCorporaResponse.md b/docs/api/google/generativeai/protos/ListCorporaResponse.md new file mode 100644 index 000000000..8b58310d4 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListCorporaResponse.md @@ -0,0 +1,60 @@ +description: Response from ListCorpora containing a paginated list of Corpora. + +
+ + +
+ +# google.generativeai.protos.ListCorporaResponse + + + + + + + + + +Response from ``ListCorpora`` containing a paginated list of ``Corpora``. + + + The results are sorted by ascending +``corpus.create_time``. + + + + + + + + + + + + + + + +
+`corpora` + +`MutableSequence[google.ai.generativelanguage.Corpus]` + +The returned corpora. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. If this field is omitted, there are no more +pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListDocumentsRequest.md b/docs/api/google/generativeai/protos/ListDocumentsRequest.md new file mode 100644 index 000000000..ce628dfef --- /dev/null +++ b/docs/api/google/generativeai/protos/ListDocumentsRequest.md @@ -0,0 +1,79 @@ +description: Request for listing Document\ s. + +
+ + +
+ +# google.generativeai.protos.ListDocumentsRequest + + + + + + + + + +Request for listing ``Document``\ s. + + + + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Required. The name of the ``Corpus`` containing +``Document``\ s. Example: ``corpora/my-corpus-123`` +
+`page_size` + +`int` + +Optional. The maximum number of ``Document``\ s to return +(per page). The service may return fewer ``Document``\ s. + +If unspecified, at most 10 ``Document``\ s will be returned. +The maximum size limit is 20 ``Document``\ s per page. +
+`page_token` + +`str` + +Optional. A page token, received from a previous +``ListDocuments`` call. + +Provide the ``next_page_token`` returned in the response as +an argument to the next request to retrieve the next page. + +When paginating, all other parameters provided to +``ListDocuments`` must match the call that provided the page +token. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListDocumentsResponse.md b/docs/api/google/generativeai/protos/ListDocumentsResponse.md new file mode 100644 index 000000000..7d4f7b4c8 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListDocumentsResponse.md @@ -0,0 +1,60 @@ +description: Response from ListDocuments containing a paginated list of Document\ s. + +
+ + +
+ +# google.generativeai.protos.ListDocumentsResponse + + + + + + + + + +Response from ``ListDocuments`` containing a paginated list of ``Document``\ s. + + + The ``Document``\ s are sorted by ascending +``document.create_time``. + + + + + + + + + + + + + + + +
+`documents` + +`MutableSequence[google.ai.generativelanguage.Document]` + +The returned ``Document``\ s. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. If this field is omitted, there are no more +pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListFilesRequest.md b/docs/api/google/generativeai/protos/ListFilesRequest.md new file mode 100644 index 000000000..5b4e3e08e --- /dev/null +++ b/docs/api/google/generativeai/protos/ListFilesRequest.md @@ -0,0 +1,59 @@ +description: Request for ListFiles. + +
+ + +
+ +# google.generativeai.protos.ListFilesRequest + + + + + + + + + +Request for ``ListFiles``. + + + + + + + + + + + + + + + + + + +
+`page_size` + +`int` + +Optional. Maximum number of ``File``\ s to return per page. +If unspecified, defaults to 10. Maximum ``page_size`` is +100. +
+`page_token` + +`str` + +Optional. A page token from a previous ``ListFiles`` call. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListFilesResponse.md b/docs/api/google/generativeai/protos/ListFilesResponse.md new file mode 100644 index 000000000..3f1045ebe --- /dev/null +++ b/docs/api/google/generativeai/protos/ListFilesResponse.md @@ -0,0 +1,58 @@ +description: Response for ListFiles. + +
+ + +
+ +# google.generativeai.protos.ListFilesResponse + + + + + + + + + +Response for ``ListFiles``. + + + + + + + + + + + + + + + + + + +
+`files` + +`MutableSequence[google.ai.generativelanguage.File]` + +The list of ``File``\ s. +
+`next_page_token` + +`str` + +A token that can be sent as a ``page_token`` into a +subsequent ``ListFiles`` call. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListModelsRequest.md b/docs/api/google/generativeai/protos/ListModelsRequest.md new file mode 100644 index 000000000..50095c67b --- /dev/null +++ b/docs/api/google/generativeai/protos/ListModelsRequest.md @@ -0,0 +1,69 @@ +description: Request for listing all Models. + +
+ + +
+ +# google.generativeai.protos.ListModelsRequest + + + + + + + + + +Request for listing all Models. + + + + + + + + + + + + + + + + + + +
+`page_size` + +`int` + +The maximum number of ``Models`` to return (per page). + +The service may return fewer models. If unspecified, at most +50 models will be returned per page. This method returns at +most 1000 models per page, even if you pass a larger +page_size. +
+`page_token` + +`str` + +A page token, received from a previous ``ListModels`` call. + +Provide the ``page_token`` returned by one request as an +argument to the next request to retrieve the next page. + +When paginating, all other parameters provided to +``ListModels`` must match the call that provided the page +token. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListModelsResponse.md b/docs/api/google/generativeai/protos/ListModelsResponse.md new file mode 100644 index 000000000..062d45370 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListModelsResponse.md @@ -0,0 +1,60 @@ +description: Response from ListModel containing a paginated list of Models. + +
+ + +
+ +# google.generativeai.protos.ListModelsResponse + + + + + + + + + +Response from ``ListModel`` containing a paginated list of Models. + + + + + + + + + + + + + + + + + + +
+`models` + +`MutableSequence[google.ai.generativelanguage.Model]` + +The returned Models. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. + +If this field is omitted, there are no more pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListPermissionsRequest.md b/docs/api/google/generativeai/protos/ListPermissionsRequest.md new file mode 100644 index 000000000..1b25c7132 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListPermissionsRequest.md @@ -0,0 +1,80 @@ +description: Request for listing permissions. + +
+ + +
+ +# google.generativeai.protos.ListPermissionsRequest + + + + + + + + + +Request for listing permissions. + + + + + + + + + + + + + + + + + + + + + +
+`parent` + +`str` + +Required. The parent resource of the permissions. Formats: +``tunedModels/{tuned_model}`` ``corpora/{corpus}`` +
+`page_size` + +`int` + +Optional. The maximum number of ``Permission``\ s to return +(per page). The service may return fewer permissions. + +If unspecified, at most 10 permissions will be returned. +This method returns at most 1000 permissions per page, even +if you pass larger page_size. +
+`page_token` + +`str` + +Optional. A page token, received from a previous +``ListPermissions`` call. + +Provide the ``page_token`` returned by one request as an +argument to the next request to retrieve the next page. + +When paginating, all other parameters provided to +``ListPermissions`` must match the call that provided the +page token. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListPermissionsResponse.md b/docs/api/google/generativeai/protos/ListPermissionsResponse.md new file mode 100644 index 000000000..f6e0ace5c --- /dev/null +++ b/docs/api/google/generativeai/protos/ListPermissionsResponse.md @@ -0,0 +1,60 @@ +description: Response from ListPermissions containing a paginated list of permissions. + +
+ + +
+ +# google.generativeai.protos.ListPermissionsResponse + + + + + + + + + +Response from ``ListPermissions`` containing a paginated list of permissions. + + + + + + + + + + + + + + + + + + +
+`permissions` + +`MutableSequence[google.ai.generativelanguage.Permission]` + +Returned permissions. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. + +If this field is omitted, there are no more pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/ListTunedModelsRequest.md b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md new file mode 100644 index 000000000..f8ac44e25 --- /dev/null +++ b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md @@ -0,0 +1,97 @@ +description: Request for listing TunedModels. + +
+ + +
+ +# google.generativeai.protos.ListTunedModelsRequest + + + + + + + + + +Request for listing TunedModels. + + + + + + + + + + + + + + + + + + + + + +
+`page_size` + +`int` + +Optional. The maximum number of ``TunedModels`` to return +(per page). The service may return fewer tuned models. + +If unspecified, at most 10 tuned models will be returned. +This method returns at most 1000 models per page, even if +you pass a larger page_size. +
+`page_token` + +`str` + +Optional. A page token, received from a previous +``ListTunedModels`` call. + +Provide the ``page_token`` returned by one request as an +argument to the next request to retrieve the next page. + +When paginating, all other parameters provided to +``ListTunedModels`` must match the call that provided the +page token. +
+`filter` + +`str` + +Optional. A filter is a full text search over +the tuned model's description and display name. +By default, results will not include tuned +models shared with everyone. + +Additional operators: + + - owner:me + - writers:me + - readers:me + - readers:everyone + +Examples: + + "owner:me" returns all tuned models to which +caller has owner role "readers:me" returns all +tuned models to which caller has reader role +"readers:everyone" returns all tuned models that +are shared with everyone +
+ + + diff --git a/docs/api/google/generativeai/protos/ListTunedModelsResponse.md b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md new file mode 100644 index 000000000..0f247c4dc --- /dev/null +++ b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md @@ -0,0 +1,60 @@ +description: Response from ListTunedModels containing a paginated list of Models. + +
+ + +
+ +# google.generativeai.protos.ListTunedModelsResponse + + + + + + + + + +Response from ``ListTunedModels`` containing a paginated list of Models. + + + + + + + + + + + + + + + + + + +
+`tuned_models` + +`MutableSequence[google.ai.generativelanguage.TunedModel]` + +The returned Models. +
+`next_page_token` + +`str` + +A token, which can be sent as ``page_token`` to retrieve the +next page. + +If this field is omitted, there are no more pages. +
+ + + diff --git a/docs/api/google/generativeai/protos/Message.md b/docs/api/google/generativeai/protos/Message.md new file mode 100644 index 000000000..94d7d17af --- /dev/null +++ b/docs/api/google/generativeai/protos/Message.md @@ -0,0 +1,86 @@ +description: The base unit of structured text. + +
+ + +
+ +# google.generativeai.protos.Message + + + + + + + + + +The base unit of structured text. + + + +A ``Message`` includes an ``author`` and the ``content`` of the +``Message``. + +The ``author`` is used to tag messages when they are fed to the +model as text. + + + + + + + + + + + + + + + + + + + + +
+`author` + +`str` + +Optional. The author of this Message. + +This serves as a key for tagging +the content of this Message when it is fed to +the model as text. + +The author can be any alphanumeric string. +
+`content` + +`str` + +Required. The text content of the structured ``Message``. +
+`citation_metadata` + +`google.ai.generativelanguage.CitationMetadata` + +Output only. Citation information for model-generated +``content`` in this ``Message``. + +If this ``Message`` was generated as output from the model, +this field may be populated with attribution information for +any text included in the ``content``. This field is used +only on output. + +
+ + + diff --git a/docs/api/google/generativeai/protos/MessagePrompt.md b/docs/api/google/generativeai/protos/MessagePrompt.md new file mode 100644 index 000000000..2386e5bb5 --- /dev/null +++ b/docs/api/google/generativeai/protos/MessagePrompt.md @@ -0,0 +1,103 @@ +description: All of the structured input text passed to the model as a prompt. + +
+ + +
+ +# google.generativeai.protos.MessagePrompt + + + + + + + + + +All of the structured input text passed to the model as a prompt. + + + +A ``MessagePrompt`` contains a structured set of fields that provide +context for the conversation, examples of user input/model output +message pairs that prime the model to respond in different ways, and +the conversation history or list of messages representing the +alternating turns of the conversation between the user and the +model. + + + + + + + + + + + + + + + + + + +
+`context` + +`str` + +Optional. Text that should be provided to the model first to +ground the response. + +If not empty, this ``context`` will be given to the model +first before the ``examples`` and ``messages``. When using a +``context`` be sure to provide it with every request to +maintain continuity. + +This field can be a description of your prompt to the model +to help provide context and guide the responses. Examples: +"Translate the phrase from English to French." or "Given a +statement, classify the sentiment as happy, sad or neutral." + +Anything included in this field will take precedence over +message history if the total input size exceeds the model's +``input_token_limit`` and the input request is truncated. +
+`examples` + +`MutableSequence[google.ai.generativelanguage.Example]` + +Optional. Examples of what the model should generate. + +This includes both user input and the response that the +model should emulate. + +These ``examples`` are treated identically to conversation +messages except that they take precedence over the history +in ``messages``: If the total input size exceeds the model's +``input_token_limit`` the input will be truncated. Items +will be dropped from ``messages`` before ``examples``. +
+`messages` + +`MutableSequence[google.ai.generativelanguage.Message]` + +Required. A snapshot of the recent conversation history +sorted chronologically. + +Turns alternate between two authors. + +If the total input size exceeds the model's +``input_token_limit`` the input will be truncated: The +oldest items will be dropped from ``messages``. +
+ + + diff --git a/docs/api/google/generativeai/protos/MetadataFilter.md b/docs/api/google/generativeai/protos/MetadataFilter.md new file mode 100644 index 000000000..447a76665 --- /dev/null +++ b/docs/api/google/generativeai/protos/MetadataFilter.md @@ -0,0 +1,63 @@ +description: User provided filter to limit retrieval based on Chunk or Document level metadata values. + +
+ + +
+ +# google.generativeai.protos.MetadataFilter + + + + + + + + + +User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values. + + + Example (genre = drama OR genre += action): key = "document.custom_metadata.genre" conditions = +[{string_value = "drama", operation = EQUAL}, {string_value = +"action", operation = EQUAL}] + + + + + + + + + + + + + + + +
+`key` + +`str` + +Required. The key of the metadata to filter +on. +
+`conditions` + +`MutableSequence[google.ai.generativelanguage.Condition]` + +Required. The ``Condition``\ s for the given key that will +trigger this filter. Multiple ``Condition``\ s are joined by +logical ORs. +
+ + + diff --git a/docs/api/google/generativeai/protos/Model.md b/docs/api/google/generativeai/protos/Model.md new file mode 100644 index 000000000..c4105e24e --- /dev/null +++ b/docs/api/google/generativeai/protos/Model.md @@ -0,0 +1,193 @@ +description: Information about a Generative Language Model. + +
+ + +
+ +# google.generativeai.protos.Model + + + + + + + + + +Information about a Generative Language Model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the ``Model``. + +Format: ``models/{model}`` with a ``{model}`` naming +convention of: + +- "{base_model_id}-{version}" + +Examples: + +- ``models/chat-bison-001`` +
+`base_model_id` + +`str` + +Required. The name of the base model, pass this to the +generation request. + +Examples: + +- ``chat-bison`` +
+`version` + +`str` + +Required. The version number of the model. + +This represents the major version +
+`display_name` + +`str` + +The human-readable name of the model. E.g. +"Chat Bison". +The name can be up to 128 characters long and +can consist of any UTF-8 characters. +
+`description` + +`str` + +A short description of the model. +
+`input_token_limit` + +`int` + +Maximum number of input tokens allowed for +this model. +
+`output_token_limit` + +`int` + +Maximum number of output tokens available for +this model. +
+`supported_generation_methods` + +`MutableSequence[str]` + +The model's supported generation methods. + +The method names are defined as Pascal case strings, such as +``generateMessage`` which correspond to API methods. +
+`temperature` + +`float` + +Controls the randomness of the output. + +Values can range over ``[0.0,max_temperature]``, inclusive. +A higher value will produce responses that are more varied, +while a value closer to ``0.0`` will typically result in +less surprising responses from the model. This value +specifies default to be used by the backend while making the +call to the model. + +
+`max_temperature` + +`float` + +The maximum temperature this model can use. + +
+`top_p` + +`float` + +For Nucleus sampling. + +Nucleus sampling considers the smallest set of tokens whose +probability sum is at least ``top_p``. This value specifies +default to be used by the backend while making the call to +the model. + +
+`top_k` + +`int` + +For Top-k sampling. + +Top-k sampling considers the set of ``top_k`` most probable +tokens. This value specifies default to be used by the +backend while making the call to the model. If empty, +indicates the model doesn't use top-k sampling, and +``top_k`` isn't allowed as a generation parameter. + +
+ + + diff --git a/docs/api/google/generativeai/protos/Part.md b/docs/api/google/generativeai/protos/Part.md new file mode 100644 index 000000000..24e8ba0a6 --- /dev/null +++ b/docs/api/google/generativeai/protos/Part.md @@ -0,0 +1,136 @@ +description: A datatype containing media that is part of a multi-part Content message. + +
+ + +
+ +# google.generativeai.protos.Part + + + + + + + + + +A datatype containing media that is part of a multi-part ``Content`` message. + + + +A ``Part`` consists of data which has an associated datatype. A +``Part`` can only contain one of the accepted types in +``Part.data``. + +A ``Part`` must have a fixed IANA MIME type identifying the type and +subtype of the media if the ``inline_data`` field is filled with raw +bytes. + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`text` + +`str` + +Inline text. + +This field is a member of `oneof`_ ``data``. +
+`inline_data` + +`google.ai.generativelanguage.Blob` + +Inline media bytes. + +This field is a member of `oneof`_ ``data``. +
+`function_call` + +`google.ai.generativelanguage.FunctionCall` + +A predicted ``FunctionCall`` returned from the model that +contains a string representing the +FunctionDeclaration.name with the arguments and their +values. + +This field is a member of `oneof`_ ``data``. +
+`function_response` + +`google.ai.generativelanguage.FunctionResponse` + +The result output of a ``FunctionCall`` that contains a +string representing the FunctionDeclaration.name and a +structured JSON object containing any output from the +function is used as context to the model. + +This field is a member of `oneof`_ ``data``. +
+`file_data` + +`google.ai.generativelanguage.FileData` + +URI based data. + +This field is a member of `oneof`_ ``data``. +
+`executable_code` + +`google.ai.generativelanguage.ExecutableCode` + +Code generated by the model that is meant to +be executed. + +This field is a member of `oneof`_ ``data``. +
+`code_execution_result` + +`google.ai.generativelanguage.CodeExecutionResult` + +Result of executing the ``ExecutableCode``. + +This field is a member of `oneof`_ ``data``. +
+ + + diff --git a/docs/api/google/generativeai/protos/Permission.md b/docs/api/google/generativeai/protos/Permission.md new file mode 100644 index 000000000..de4165fcb --- /dev/null +++ b/docs/api/google/generativeai/protos/Permission.md @@ -0,0 +1,110 @@ +description: Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g. + +
+ + + + +
+ +# google.generativeai.protos.Permission + + + + + + + + + +Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g. + + + a tuned model, +corpus). + +A role is a collection of permitted operations that allows users +to perform specific actions on PaLM API resources. To make them +available to users, groups, or service accounts, you assign +roles. When you assign a role, you grant permissions that the +role contains. + +There are three concentric roles. Each role is a superset of the +previous role's permitted operations: + +- reader can use the resource (e.g. tuned model, corpus) for + inference +- writer has reader's permissions and additionally can edit and + share +- owner has writer's permissions and additionally can delete + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Output only. Identifier. The permission name. A unique name +will be generated on create. Examples: +tunedModels/{tuned_model}/permissions/{permission} +corpora/{corpus}/permissions/{permission} Output only. +
+`grantee_type` + +`google.ai.generativelanguage.Permission.GranteeType` + +Optional. Immutable. The type of the grantee. + +
+`email_address` + +`str` + +Optional. Immutable. The email address of the +user of group which this permission refers. +Field is not set when permission's grantee type +is EVERYONE. + +
+`role` + +`google.ai.generativelanguage.Permission.Role` + +Required. The role granted by this +permission. + +
+ + + +## Child Classes +[`class GranteeType`](../../../google/generativeai/protos/Permission/GranteeType.md) + +[`class Role`](../../../google/generativeai/protos/Permission/Role.md) + diff --git a/docs/api/google/generativeai/protos/Permission/GranteeType.md b/docs/api/google/generativeai/protos/Permission/GranteeType.md new file mode 100644 index 000000000..ef0d1f378 --- /dev/null +++ b/docs/api/google/generativeai/protos/Permission/GranteeType.md @@ -0,0 +1,698 @@ +description: Defines types of the grantee of this permission. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.Permission.GranteeType + + + + + + + + + +Defines types of the grantee of this permission. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`GRANTEE_TYPE_UNSPECIFIED` + +`0` + +The default value. This value is unused. +
+`USER` + +`1` + +Represents a user. When set, you must provide email_address +for the user. +
+`GROUP` + +`2` + +Represents a group. When set, you must provide email_address +for the group. +
+`EVERYONE` + +`3` + +Represents access to everyone. No extra +information is required. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+EVERYONE + +`` +
+GRANTEE_TYPE_UNSPECIFIED + +`` +
+GROUP + +`` +
+USER + +`` +
+ diff --git a/docs/api/google/generativeai/protos/Permission/Role.md b/docs/api/google/generativeai/protos/Permission/Role.md new file mode 100644 index 000000000..a59665a55 --- /dev/null +++ b/docs/api/google/generativeai/protos/Permission/Role.md @@ -0,0 +1,697 @@ +description: Defines the role granted by this permission. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.Permission.Role + + + + + + + + + +Defines the role granted by this permission. + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`ROLE_UNSPECIFIED` + +`0` + +The default value. This value is unused. +
+`OWNER` + +`1` + +Owner can use, update, share and delete the +resource. +
+`WRITER` + +`2` + +Writer can use, update and share the +resource. +
+`READER` + +`3` + +Reader can use the resource. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+OWNER + +`` +
+READER + +`` +
+ROLE_UNSPECIFIED + +`` +
+WRITER + +`` +
+ diff --git a/docs/api/google/generativeai/protos/QueryCorpusRequest.md b/docs/api/google/generativeai/protos/QueryCorpusRequest.md new file mode 100644 index 000000000..60d839b1f --- /dev/null +++ b/docs/api/google/generativeai/protos/QueryCorpusRequest.md @@ -0,0 +1,109 @@ +description: Request for querying a Corpus. + +
+ + +
+ +# google.generativeai.protos.QueryCorpusRequest + + + + + + + + + +Request for querying a ``Corpus``. + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``Corpus`` to query. Example: +``corpora/my-corpus-123`` +
+`query` + +`str` + +Required. Query string to perform semantic +search. +
+`metadata_filters` + +`MutableSequence[google.ai.generativelanguage.MetadataFilter]` + +Optional. Filter for ``Chunk`` and ``Document`` metadata. +Each ``MetadataFilter`` object should correspond to a unique +key. Multiple ``MetadataFilter`` objects are joined by +logical "AND"s. + +Example query at document level: (year >= 2020 OR year < +2010) AND (genre = drama OR genre = action) + +``MetadataFilter`` object list: metadata_filters = [ {key = +"document.custom_metadata.year" conditions = [{int_value = +2020, operation = GREATER_EQUAL}, {int_value = 2010, +operation = LESS}]}, {key = "document.custom_metadata.year" +conditions = [{int_value = 2020, operation = GREATER_EQUAL}, +{int_value = 2010, operation = LESS}]}, {key = +"document.custom_metadata.genre" conditions = [{string_value += "drama", operation = EQUAL}, {string_value = "action", +operation = EQUAL}]}] + +Example query at chunk level for a numeric range of values: +(year > 2015 AND year <= 2020) + +``MetadataFilter`` object list: metadata_filters = [ {key = +"chunk.custom_metadata.year" conditions = [{int_value = +2015, operation = GREATER}]}, {key = +"chunk.custom_metadata.year" conditions = [{int_value = +2020, operation = LESS_EQUAL}]}] + +Note: "AND"s for the same key are only supported for numeric +values. String values only support "OR"s for the same key. +
+`results_count` + +`int` + +Optional. The maximum number of ``Chunk``\ s to return. The +service may return fewer ``Chunk``\ s. + +If unspecified, at most 10 ``Chunk``\ s will be returned. +The maximum specified result count is 100. +
+ + + diff --git a/docs/api/google/generativeai/protos/QueryCorpusResponse.md b/docs/api/google/generativeai/protos/QueryCorpusResponse.md new file mode 100644 index 000000000..cccf776a6 --- /dev/null +++ b/docs/api/google/generativeai/protos/QueryCorpusResponse.md @@ -0,0 +1,48 @@ +description: Response from QueryCorpus containing a list of relevant chunks. + +
+ + +
+ +# google.generativeai.protos.QueryCorpusResponse + + + + + + + + + +Response from ``QueryCorpus`` containing a list of relevant chunks. + + + + + + + + + + + + + + + +
+`relevant_chunks` + +`MutableSequence[google.ai.generativelanguage.RelevantChunk]` + +The relevant chunks. +
+ + + diff --git a/docs/api/google/generativeai/protos/QueryDocumentRequest.md b/docs/api/google/generativeai/protos/QueryDocumentRequest.md new file mode 100644 index 000000000..c31688fff --- /dev/null +++ b/docs/api/google/generativeai/protos/QueryDocumentRequest.md @@ -0,0 +1,109 @@ +description: Request for querying a Document. + +
+ + +
+ +# google.generativeai.protos.QueryDocumentRequest + + + + + + + + + +Request for querying a ``Document``. + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The name of the ``Document`` to query. Example: +``corpora/my-corpus-123/documents/the-doc-abc`` +
+`query` + +`str` + +Required. Query string to perform semantic +search. +
+`results_count` + +`int` + +Optional. The maximum number of ``Chunk``\ s to return. The +service may return fewer ``Chunk``\ s. + +If unspecified, at most 10 ``Chunk``\ s will be returned. +The maximum specified result count is 100. +
+`metadata_filters` + +`MutableSequence[google.ai.generativelanguage.MetadataFilter]` + +Optional. Filter for ``Chunk`` metadata. Each +``MetadataFilter`` object should correspond to a unique key. +Multiple ``MetadataFilter`` objects are joined by logical +"AND"s. + +Note: ``Document``-level filtering is not supported for this +request because a ``Document`` name is already specified. + +Example query: (year >= 2020 OR year < 2010) AND (genre = +drama OR genre = action) + +``MetadataFilter`` object list: metadata_filters = [ {key = +"chunk.custom_metadata.year" conditions = [{int_value = +2020, operation = GREATER_EQUAL}, {int_value = 2010, +operation = LESS}}, {key = "chunk.custom_metadata.genre" +conditions = [{string_value = "drama", operation = EQUAL}, +{string_value = "action", operation = EQUAL}}] + +Example query for a numeric range of values: (year > 2015 +AND year <= 2020) + +``MetadataFilter`` object list: metadata_filters = [ {key = +"chunk.custom_metadata.year" conditions = [{int_value = +2015, operation = GREATER}]}, {key = +"chunk.custom_metadata.year" conditions = [{int_value = +2020, operation = LESS_EQUAL}]}] + +Note: "AND"s for the same key are only supported for numeric +values. String values only support "OR"s for the same key. +
+ + + diff --git a/docs/api/google/generativeai/protos/QueryDocumentResponse.md b/docs/api/google/generativeai/protos/QueryDocumentResponse.md new file mode 100644 index 000000000..aabff9d01 --- /dev/null +++ b/docs/api/google/generativeai/protos/QueryDocumentResponse.md @@ -0,0 +1,48 @@ +description: Response from QueryDocument containing a list of relevant chunks. + +
+ + +
+ +# google.generativeai.protos.QueryDocumentResponse + + + + + + + + + +Response from ``QueryDocument`` containing a list of relevant chunks. + + + + + + + + + + + + + + + +
+`relevant_chunks` + +`MutableSequence[google.ai.generativelanguage.RelevantChunk]` + +The returned relevant chunks. +
+ + + diff --git a/docs/api/google/generativeai/protos/RelevantChunk.md b/docs/api/google/generativeai/protos/RelevantChunk.md new file mode 100644 index 000000000..c9a7f7036 --- /dev/null +++ b/docs/api/google/generativeai/protos/RelevantChunk.md @@ -0,0 +1,57 @@ +description: The information for a chunk relevant to a query. + +
+ + +
+ +# google.generativeai.protos.RelevantChunk + + + + + + + + + +The information for a chunk relevant to a query. + + + + + + + + + + + + + + + + + + +
+`chunk_relevance_score` + +`float` + +``Chunk`` relevance to the query. +
+`chunk` + +`google.ai.generativelanguage.Chunk` + +``Chunk`` associated with the query. +
+ + + diff --git a/docs/api/google/generativeai/protos/SafetyFeedback.md b/docs/api/google/generativeai/protos/SafetyFeedback.md new file mode 100644 index 000000000..6d7df4664 --- /dev/null +++ b/docs/api/google/generativeai/protos/SafetyFeedback.md @@ -0,0 +1,63 @@ +description: Safety feedback for an entire request. + +
+ + +
+ +# google.generativeai.protos.SafetyFeedback + + + + + + + + + +Safety feedback for an entire request. + + + +This field is populated if content in the input and/or response +is blocked due to safety settings. SafetyFeedback may not exist +for every HarmCategory. Each SafetyFeedback will return the +safety settings used by the request as well as the lowest +HarmProbability that should be allowed in order to return a +result. + + + + + + + + + + + + + + + +
+`rating` + +`google.ai.generativelanguage.SafetyRating` + +Safety rating evaluated from content. +
+`setting` + +`google.ai.generativelanguage.SafetySetting` + +Safety settings applied to the request. +
+ + + diff --git a/docs/api/google/generativeai/protos/SafetyRating.md b/docs/api/google/generativeai/protos/SafetyRating.md new file mode 100644 index 000000000..b53dda402 --- /dev/null +++ b/docs/api/google/generativeai/protos/SafetyRating.md @@ -0,0 +1,77 @@ +description: Safety rating for a piece of content. + +
+ + + +
+ +# google.generativeai.protos.SafetyRating + + + + + + + + + +Safety rating for a piece of content. + + + +The safety rating contains the category of harm and the harm +probability level in that category for a piece of content. +Content is classified for safety across a number of harm +categories and the probability of the harm classification is +included here. + + + + + + + + + + + + + + + + + + +
+`category` + +`google.ai.generativelanguage.HarmCategory` + +Required. The category for this rating. +
+`probability` + +`google.ai.generativelanguage.SafetyRating.HarmProbability` + +Required. The probability of harm for this +content. +
+`blocked` + +`bool` + +Was this content blocked because of this +rating? +
+ + + +## Child Classes +[`class HarmProbability`](../../../google/generativeai/types/HarmProbability.md) + diff --git a/docs/api/google/generativeai/protos/SafetySetting.md b/docs/api/google/generativeai/protos/SafetySetting.md new file mode 100644 index 000000000..5806267ee --- /dev/null +++ b/docs/api/google/generativeai/protos/SafetySetting.md @@ -0,0 +1,64 @@ +description: Safety setting, affecting the safety-blocking behavior. + +
+ + + +
+ +# google.generativeai.protos.SafetySetting + + + + + + + + + +Safety setting, affecting the safety-blocking behavior. + + + +Passing a safety setting for a category changes the allowed +probability that content is blocked. + + + + + + + + + + + + + + + +
+`category` + +`google.ai.generativelanguage.HarmCategory` + +Required. The category for this setting. +
+`threshold` + +`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold` + +Required. Controls the probability threshold +at which harm is blocked. +
+ + + +## Child Classes +[`class HarmBlockThreshold`](../../../google/generativeai/types/HarmBlockThreshold.md) + diff --git a/docs/api/google/generativeai/protos/Schema.md b/docs/api/google/generativeai/protos/Schema.md new file mode 100644 index 000000000..d23c8e7a1 --- /dev/null +++ b/docs/api/google/generativeai/protos/Schema.md @@ -0,0 +1,132 @@ +description: The Schema object allows the definition of input and output data types. + +
+ + + +
+ +# google.generativeai.protos.Schema + + + + + + + + + +The ``Schema`` object allows the definition of input and output data types. + + + These types can be objects, but also primitives and arrays. +Represents a select subset of an `OpenAPI 3.0 schema +object `__. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`type_` + +`google.ai.generativelanguage.Type` + +Required. Data type. +
+`format_` + +`str` + +Optional. The format of the data. This is +used only for primitive datatypes. Supported +formats: + + for NUMBER type: float, double + for INTEGER type: int32, int64 +
+`description` + +`str` + +Optional. A brief description of the +parameter. This could contain examples of use. +Parameter description may be formatted as +Markdown. +
+`nullable` + +`bool` + +Optional. Indicates if the value may be null. +
+`enum` + +`MutableSequence[str]` + +Optional. Possible values of the element of Type.STRING with +enum format. For example we can define an Enum Direction as +: {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", +"WEST"]} +
+`items` + +`google.ai.generativelanguage.Schema` + +Optional. Schema of the elements of +Type.ARRAY. + +
+`properties` + +`MutableMapping[str, google.ai.generativelanguage.Schema]` + +Optional. Properties of Type.OBJECT. +
+`required` + +`MutableSequence[str]` + +Optional. Required properties of Type.OBJECT. +
+ + + +## Child Classes +[`class PropertiesEntry`](../../../google/generativeai/protos/Schema/PropertiesEntry.md) + diff --git a/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md new file mode 100644 index 000000000..03df0d63c --- /dev/null +++ b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md @@ -0,0 +1,89 @@ +description: The abstract base class for a message. + +
+ + +
+ +# google.generativeai.protos.Schema.PropertiesEntry + + + + + + + + + +The abstract base class for a message. + + + + + + + + + + + + + + + + + + +
+mapping (Union[dict, ~.Message]): A dictionary or message to be +used to determine the values for this message. +
+`ignore_unknown_fields` + +`Optional(bool` + +If True, do not raise errors for + unknown fields. Only applied if `mapping` is a mapping type or there + are keyword parameters. +
+`kwargs` + +`dict` + +Keys and values corresponding to the fields of the + message. +
+ + + + + + + + + + + + + + + + + +
+`key` + +`string key` +
+`value` + +`Schema value` +
+ + + diff --git a/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md new file mode 100644 index 000000000..1d930b0ec --- /dev/null +++ b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md @@ -0,0 +1,92 @@ +description: Configuration for retrieving grounding content from a Corpus or Document created using the Semantic Retriever API. + +
+ + +
+ +# google.generativeai.protos.SemanticRetrieverConfig + + + + + + + + + +Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`source` + +`str` + +Required. Name of the resource for retrieval, +e.g. corpora/123 or corpora/123/documents/abc. +
+`query` + +`google.ai.generativelanguage.Content` + +Required. Query to use for similarity matching ``Chunk``\ s +in the given resource. +
+`metadata_filters` + +`MutableSequence[google.ai.generativelanguage.MetadataFilter]` + +Optional. Filters for selecting ``Document``\ s and/or +``Chunk``\ s from the resource. +
+`max_chunks_count` + +`int` + +Optional. Maximum number of relevant ``Chunk``\ s to +retrieve. + +
+`minimum_relevance_score` + +`float` + +Optional. Minimum relevance score for retrieved relevant +``Chunk``\ s. + +
+ + + diff --git a/docs/api/google/generativeai/protos/StringList.md b/docs/api/google/generativeai/protos/StringList.md new file mode 100644 index 000000000..d36980c9d --- /dev/null +++ b/docs/api/google/generativeai/protos/StringList.md @@ -0,0 +1,48 @@ +description: User provided string values assigned to a single metadata key. + +
+ + +
+ +# google.generativeai.protos.StringList + + + + + + + + + +User provided string values assigned to a single metadata key. + + + + + + + + + + + + + + + +
+`values` + +`MutableSequence[str]` + +The string values of the metadata to store. +
+ + + diff --git a/docs/api/google/generativeai/protos/TaskType.md b/docs/api/google/generativeai/protos/TaskType.md new file mode 100644 index 000000000..159d41c7a --- /dev/null +++ b/docs/api/google/generativeai/protos/TaskType.md @@ -0,0 +1,771 @@ +description: Type of task for which the embedding will be used. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.TaskType + + + + + + + + + +Type of task for which the embedding will be used. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`TASK_TYPE_UNSPECIFIED` + +`0` + +Unset value, which will default to one of the +other enum values. +
+`RETRIEVAL_QUERY` + +`1` + +Specifies the given text is a query in a +search/retrieval setting. +
+`RETRIEVAL_DOCUMENT` + +`2` + +Specifies the given text is a document from +the corpus being searched. +
+`SEMANTIC_SIMILARITY` + +`3` + +Specifies the given text will be used for +STS. +
+`CLASSIFICATION` + +`4` + +Specifies that the given text will be +classified. +
+`CLUSTERING` + +`5` + +Specifies that the embeddings will be used +for clustering. +
+`QUESTION_ANSWERING` + +`6` + +Specifies that the given text will be used +for question answering. +
+`FACT_VERIFICATION` + +`7` + +Specifies that the given text will be used +for fact verification. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+CLASSIFICATION + +`` +
+CLUSTERING + +`` +
+FACT_VERIFICATION + +`` +
+QUESTION_ANSWERING + +`` +
+RETRIEVAL_DOCUMENT + +`` +
+RETRIEVAL_QUERY + +`` +
+SEMANTIC_SIMILARITY + +`` +
+TASK_TYPE_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/protos/TextCompletion.md b/docs/api/google/generativeai/protos/TextCompletion.md new file mode 100644 index 000000000..0331b5fb4 --- /dev/null +++ b/docs/api/google/generativeai/protos/TextCompletion.md @@ -0,0 +1,74 @@ +description: Output text returned from a model. + +
+ + +
+ +# google.generativeai.protos.TextCompletion + + + + + + + + + +Output text returned from a model. + + + + + + + + + + + + + + + + + + + + + +
+`output` + +`str` + +Output only. The generated text returned from +the model. +
+`safety_ratings` + +`MutableSequence[google.ai.generativelanguage.SafetyRating]` + +Ratings for the safety of a response. + +There is at most one rating per category. +
+`citation_metadata` + +`google.ai.generativelanguage.CitationMetadata` + +Output only. Citation information for model-generated +``output`` in this ``TextCompletion``. + +This field may be populated with attribution information for +any text included in the ``output``. + +
+ + + diff --git a/docs/api/google/generativeai/protos/TextPrompt.md b/docs/api/google/generativeai/protos/TextPrompt.md new file mode 100644 index 000000000..5b51b8057 --- /dev/null +++ b/docs/api/google/generativeai/protos/TextPrompt.md @@ -0,0 +1,50 @@ +description: Text given to the model as a prompt. + +
+ + +
+ +# google.generativeai.protos.TextPrompt + + + + + + + + + +Text given to the model as a prompt. + + + +The Model will use this TextPrompt to Generate a text +completion. + + + + + + + + + + + + +
+`text` + +`str` + +Required. The prompt text. +
+ + + diff --git a/docs/api/google/generativeai/protos/Tool.md b/docs/api/google/generativeai/protos/Tool.md new file mode 100644 index 000000000..3e4c0bb48 --- /dev/null +++ b/docs/api/google/generativeai/protos/Tool.md @@ -0,0 +1,73 @@ +description: Tool details that the model may use to generate response. + +
+ + +
+ +# google.generativeai.protos.Tool + + + + + + + + + +Tool details that the model may use to generate response. + + + +A ``Tool`` is a piece of code that enables the system to interact +with external systems to perform an action, or set of actions, +outside of knowledge and scope of the model. + + + + + + + + + + + + + + + +
+`function_declarations` + +`MutableSequence[google.ai.generativelanguage.FunctionDeclaration]` + +Optional. A list of ``FunctionDeclarations`` available to +the model that can be used for function calling. + +The model or system does not execute the function. Instead +the defined function may be returned as a +[FunctionCall][content.part.function_call] with arguments to +the client side for execution. The model may decide to call +a subset of these functions by populating +[FunctionCall][content.part.function_call] in the response. +The next conversation turn may contain a +[FunctionResponse][content.part.function_response] with the +[content.role] "function" generation context for the next +model turn. +
+`code_execution` + +`google.ai.generativelanguage.CodeExecution` + +Optional. Enables the model to execute code +as part of generation. +
+ + + diff --git a/docs/api/google/generativeai/protos/ToolConfig.md b/docs/api/google/generativeai/protos/ToolConfig.md new file mode 100644 index 000000000..3c18c3a18 --- /dev/null +++ b/docs/api/google/generativeai/protos/ToolConfig.md @@ -0,0 +1,48 @@ +description: The Tool configuration containing parameters for specifying Tool use in the request. + +
+ + +
+ +# google.generativeai.protos.ToolConfig + + + + + + + + + +The Tool configuration containing parameters for specifying ``Tool`` use in the request. + + + + + + + + + + + + + + + +
+`function_calling_config` + +`google.ai.generativelanguage.FunctionCallingConfig` + +Optional. Function calling config. +
+ + + diff --git a/docs/api/google/generativeai/protos/TransferOwnershipRequest.md b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md new file mode 100644 index 000000000..ebf034974 --- /dev/null +++ b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md @@ -0,0 +1,61 @@ +description: Request to transfer the ownership of the tuned model. + +
+ + +
+ +# google.generativeai.protos.TransferOwnershipRequest + + + + + + + + + +Request to transfer the ownership of the tuned model. + + + + + + + + + + + + + + + + + + +
+`name` + +`str` + +Required. The resource name of the tuned model to transfer +ownership. + +Format: ``tunedModels/my-model-id`` +
+`email_address` + +`str` + +Required. The email address of the user to +whom the tuned model is being transferred to. +
+ + + diff --git a/docs/api/google/generativeai/protos/TransferOwnershipResponse.md b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md new file mode 100644 index 000000000..77a6706cc --- /dev/null +++ b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md @@ -0,0 +1,27 @@ +description: Response from TransferOwnership. + +
+ + +
+ +# google.generativeai.protos.TransferOwnershipResponse + + + + + + + + + +Response from ``TransferOwnership``. + + + + diff --git a/docs/api/google/generativeai/protos/TunedModel.md b/docs/api/google/generativeai/protos/TunedModel.md new file mode 100644 index 000000000..a9517d69b --- /dev/null +++ b/docs/api/google/generativeai/protos/TunedModel.md @@ -0,0 +1,197 @@ +description: A fine-tuned model created using ModelService.CreateTunedModel. + +
+ + + +
+ +# google.generativeai.protos.TunedModel + + + + + + + + + +A fine-tuned model created using ModelService.CreateTunedModel. + + + +This message has `oneof`_ fields (mutually exclusive fields). +For each oneof, at most one member field can be set at the same time. +Setting any member of the oneof automatically clears all other +members. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`tuned_model_source` + +`google.ai.generativelanguage.TunedModelSource` + +Optional. TunedModel to use as the starting +point for training the new model. + +This field is a member of `oneof`_ ``source_model``. +
+`base_model` + +`str` + +Immutable. The name of the ``Model`` to tune. Example: +``models/text-bison-001`` + +This field is a member of `oneof`_ ``source_model``. +
+`name` + +`str` + +Output only. The tuned model name. A unique name will be +generated on create. Example: ``tunedModels/az2mb0bpw6i`` If +display_name is set on create, the id portion of the name +will be set by concatenating the words of the display_name +with hyphens and adding a random portion for uniqueness. +Example: display_name = "Sentence Translator" name = +"tunedModels/sentence-translator-u3b7m". +
+`display_name` + +`str` + +Optional. The name to display for this model +in user interfaces. The display name must be up +to 40 characters including spaces. +
+`description` + +`str` + +Optional. A short description of this model. +
+`temperature` + +`float` + +Optional. Controls the randomness of the output. + +Values can range over ``[0.0,1.0]``, inclusive. A value +closer to ``1.0`` will produce responses that are more +varied, while a value closer to ``0.0`` will typically +result in less surprising responses from the model. + +This value specifies default to be the one used by the base +model while creating the model. + +
+`top_p` + +`float` + +Optional. For Nucleus sampling. + +Nucleus sampling considers the smallest set of tokens whose +probability sum is at least ``top_p``. + +This value specifies default to be the one used by the base +model while creating the model. + +
+`top_k` + +`int` + +Optional. For Top-k sampling. + +Top-k sampling considers the set of ``top_k`` most probable +tokens. This value specifies default to be used by the +backend while making the call to the model. + +This value specifies default to be the one used by the base +model while creating the model. + +
+`state` + +`google.ai.generativelanguage.TunedModel.State` + +Output only. The state of the tuned model. +
+`create_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp when this model +was created. +
+`update_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp when this model +was updated. +
+`tuning_task` + +`google.ai.generativelanguage.TuningTask` + +Required. The tuning task that creates the +tuned model. +
+ + + +## Child Classes +[`class State`](../../../google/generativeai/types/TunedModelState.md) + diff --git a/docs/api/google/generativeai/protos/TunedModelSource.md b/docs/api/google/generativeai/protos/TunedModelSource.md new file mode 100644 index 000000000..710c06e4b --- /dev/null +++ b/docs/api/google/generativeai/protos/TunedModelSource.md @@ -0,0 +1,61 @@ +description: Tuned model as a source for training a new model. + +
+ + +
+ +# google.generativeai.protos.TunedModelSource + + + + + + + + + +Tuned model as a source for training a new model. + + + + + + + + + + + + + + + + + + +
+`tuned_model` + +`str` + +Immutable. The name of the ``TunedModel`` to use as the +starting point for training the new model. Example: +``tunedModels/my-tuned-model`` +
+`base_model` + +`str` + +Output only. The name of the base ``Model`` this +``TunedModel`` was tuned from. Example: +``models/text-bison-001`` +
+ + + diff --git a/docs/api/google/generativeai/protos/TuningExample.md b/docs/api/google/generativeai/protos/TuningExample.md new file mode 100644 index 000000000..bcbdfc7e3 --- /dev/null +++ b/docs/api/google/generativeai/protos/TuningExample.md @@ -0,0 +1,59 @@ +description: A single example for tuning. + +
+ + +
+ +# google.generativeai.protos.TuningExample + + + + + + + + + +A single example for tuning. + + + + + + + + + + + + + + + + + + +
+`text_input` + +`str` + +Optional. Text model input. + +This field is a member of `oneof`_ ``model_input``. +
+`output` + +`str` + +Required. The expected model output. +
+ + + diff --git a/docs/api/google/generativeai/protos/TuningExamples.md b/docs/api/google/generativeai/protos/TuningExamples.md new file mode 100644 index 000000000..6a9e8670a --- /dev/null +++ b/docs/api/google/generativeai/protos/TuningExamples.md @@ -0,0 +1,50 @@ +description: A set of tuning examples. Can be training or validation data. + +
+ + +
+ +# google.generativeai.protos.TuningExamples + + + + + + + + + +A set of tuning examples. Can be training or validation data. + + + + + + + + + + + + + + + +
+`examples` + +`MutableSequence[google.ai.generativelanguage.TuningExample]` + +Required. The examples. Example input can be +for text or discuss, but all examples in a set +must be of the same type. +
+ + + diff --git a/docs/api/google/generativeai/protos/TuningSnapshot.md b/docs/api/google/generativeai/protos/TuningSnapshot.md new file mode 100644 index 000000000..1e088b8f4 --- /dev/null +++ b/docs/api/google/generativeai/protos/TuningSnapshot.md @@ -0,0 +1,77 @@ +description: Record for a single tuning step. + +
+ + +
+ +# google.generativeai.protos.TuningSnapshot + + + + + + + + + +Record for a single tuning step. + + + + + + + + + + + + + + + + + + + + + + + + +
+`step` + +`int` + +Output only. The tuning step. +
+`epoch` + +`int` + +Output only. The epoch this step was part of. +
+`mean_loss` + +`float` + +Output only. The mean loss of the training +examples for this step. +
+`compute_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp when this metric +was computed. +
+ + + diff --git a/docs/api/google/generativeai/protos/TuningTask.md b/docs/api/google/generativeai/protos/TuningTask.md new file mode 100644 index 000000000..8d4a15d9e --- /dev/null +++ b/docs/api/google/generativeai/protos/TuningTask.md @@ -0,0 +1,89 @@ +description: Tuning tasks that create tuned models. + +
+ + +
+ +# google.generativeai.protos.TuningTask + + + + + + + + + +Tuning tasks that create tuned models. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`start_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp when tuning this +model started. +
+`complete_time` + +`google.protobuf.timestamp_pb2.Timestamp` + +Output only. The timestamp when tuning this +model completed. +
+`snapshots` + +`MutableSequence[google.ai.generativelanguage.TuningSnapshot]` + +Output only. Metrics collected during tuning. +
+`training_data` + +`google.ai.generativelanguage.Dataset` + +Required. Input only. Immutable. The model +training data. +
+`hyperparameters` + +`google.ai.generativelanguage.Hyperparameters` + +Immutable. Hyperparameters controlling the +tuning process. If not provided, default values +will be used. +
+ + + diff --git a/docs/api/google/generativeai/protos/Type.md b/docs/api/google/generativeai/protos/Type.md new file mode 100644 index 000000000..8184c30fc --- /dev/null +++ b/docs/api/google/generativeai/protos/Type.md @@ -0,0 +1,746 @@ +description: Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.protos.Type + + + + + + + + + +Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`TYPE_UNSPECIFIED` + +`0` + +Not specified, should not be used. +
+`STRING` + +`1` + +String type. +
+`NUMBER` + +`2` + +Number type. +
+`INTEGER` + +`3` + +Integer type. +
+`BOOLEAN` + +`4` + +Boolean type. +
+`ARRAY` + +`5` + +Array type. +
+`OBJECT` + +`6` + +Object type. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ARRAY + +`` +
+BOOLEAN + +`` +
+INTEGER + +`` +
+NUMBER + +`` +
+OBJECT + +`` +
+STRING + +`` +
+TYPE_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md new file mode 100644 index 000000000..68899d656 --- /dev/null +++ b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md @@ -0,0 +1,57 @@ +description: Request to update CachedContent. + +
+ + +
+ +# google.generativeai.protos.UpdateCachedContentRequest + + + + + + + + + +Request to update CachedContent. + + + + + + + + + + + + + + + + + + +
+`cached_content` + +`google.ai.generativelanguage.CachedContent` + +Required. The content cache entry to update +
+`update_mask` + +`google.protobuf.field_mask_pb2.FieldMask` + +The list of fields to update. +
+ + + diff --git a/docs/api/google/generativeai/protos/UpdateChunkRequest.md b/docs/api/google/generativeai/protos/UpdateChunkRequest.md new file mode 100644 index 000000000..df0ad7619 --- /dev/null +++ b/docs/api/google/generativeai/protos/UpdateChunkRequest.md @@ -0,0 +1,58 @@ +description: Request to update a Chunk. + +
+ + +
+ +# google.generativeai.protos.UpdateChunkRequest + + + + + + + + + +Request to update a ``Chunk``. + + + + + + + + + + + + + + + + + + +
+`chunk` + +`google.ai.generativelanguage.Chunk` + +Required. The ``Chunk`` to update. +
+`update_mask` + +`google.protobuf.field_mask_pb2.FieldMask` + +Required. The list of fields to update. Currently, this only +supports updating ``custom_metadata`` and ``data``. +
+ + + diff --git a/docs/api/google/generativeai/protos/UpdateCorpusRequest.md b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md new file mode 100644 index 000000000..c38d4fd20 --- /dev/null +++ b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md @@ -0,0 +1,58 @@ +description: Request to update a Corpus. + +
+ + +
+ +# google.generativeai.protos.UpdateCorpusRequest + + + + + + + + + +Request to update a ``Corpus``. + + + + + + + + + + + + + + + + + + +
+`corpus` + +`google.ai.generativelanguage.Corpus` + +Required. The ``Corpus`` to update. +
+`update_mask` + +`google.protobuf.field_mask_pb2.FieldMask` + +Required. The list of fields to update. Currently, this only +supports updating ``display_name``. +
+ + + diff --git a/docs/api/google/generativeai/protos/UpdateDocumentRequest.md b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md new file mode 100644 index 000000000..d0b903835 --- /dev/null +++ b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md @@ -0,0 +1,58 @@ +description: Request to update a Document. + +
+ + +
+ +# google.generativeai.protos.UpdateDocumentRequest + + + + + + + + + +Request to update a ``Document``. + + + + + + + + + + + + + + + + + + +
+`document` + +`google.ai.generativelanguage.Document` + +Required. The ``Document`` to update. +
+`update_mask` + +`google.protobuf.field_mask_pb2.FieldMask` + +Required. The list of fields to update. Currently, this only +supports updating ``display_name`` and ``custom_metadata``. +
+ + + diff --git a/docs/api/google/generativeai/protos/UpdatePermissionRequest.md b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md new file mode 100644 index 000000000..1a04f87ed --- /dev/null +++ b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md @@ -0,0 +1,62 @@ +description: Request to update the Permission. + +
+ + +
+ +# google.generativeai.protos.UpdatePermissionRequest + + + + + + + + + +Request to update the ``Permission``. + + + + + + + + + + + + + + + + + + +
+`permission` + +`google.ai.generativelanguage.Permission` + +Required. The permission to update. + +The permission's ``name`` field is used to identify the +permission to update. +
+`update_mask` + +`google.protobuf.field_mask_pb2.FieldMask` + +Required. The list of fields to update. Accepted ones: + +- role (Permission.role field) +
+ + + diff --git a/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md new file mode 100644 index 000000000..76ab1573c --- /dev/null +++ b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md @@ -0,0 +1,57 @@ +description: Request to update a TunedModel. + +
+ + +
+ +# google.generativeai.protos.UpdateTunedModelRequest + + + + + + + + + +Request to update a TunedModel. + + + + + + + + + + + + + + + + + + +
+`tuned_model` + +`google.ai.generativelanguage.TunedModel` + +Required. The tuned model to update. +
+`update_mask` + +`google.protobuf.field_mask_pb2.FieldMask` + +Required. The list of fields to update. +
+ + + diff --git a/docs/api/google/generativeai/protos/VideoMetadata.md b/docs/api/google/generativeai/protos/VideoMetadata.md new file mode 100644 index 000000000..5f8d7c590 --- /dev/null +++ b/docs/api/google/generativeai/protos/VideoMetadata.md @@ -0,0 +1,48 @@ +description: Metadata for a video File. + +
+ + +
+ +# google.generativeai.protos.VideoMetadata + + + + + + + + + +Metadata for a video ``File``. + + + + + + + + + + + + + + + +
+`video_duration` + +`google.protobuf.duration_pb2.Duration` + +Duration of the video. +
+ + + diff --git a/docs/api/google/generativeai/types.md b/docs/api/google/generativeai/types.md new file mode 100644 index 000000000..9dacfa610 --- /dev/null +++ b/docs/api/google/generativeai/types.md @@ -0,0 +1,182 @@ +description: A collection of type definitions used throughout the library. + +
+ + + +
+ +# Module: google.generativeai.types + + + + + + + + + +A collection of type definitions used throughout the library. + + + +## Classes + +[`class AsyncGenerateContentResponse`](../../google/generativeai/types/AsyncGenerateContentResponse.md): This is the async version of `genai.GenerateContentResponse`. + +[`class AuthorError`](../../google/generativeai/types/AuthorError.md): Raised by the `chat` (or `reply`) functions when the author list can't be normalized. + +[`class BlobDict`](../../google/generativeai/types/BlobDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +[`class BlockedPromptException`](../../google/generativeai/types/BlockedPromptException.md): Common base class for all non-exit exceptions. + +[`class BlockedReason`](../../google/generativeai/types/BlockedReason.md): A list of reasons why content may have been blocked. + +[`class BrokenResponseError`](../../google/generativeai/types/BrokenResponseError.md): Common base class for all non-exit exceptions. + +[`class CallableFunctionDeclaration`](../../google/generativeai/types/CallableFunctionDeclaration.md): An extension of `FunctionDeclaration` that can be built from a python function, and is callable. + +[`class ChatResponse`](../../google/generativeai/types/ChatResponse.md): A chat response from the model. + +[`class CitationMetadataDict`](../../google/generativeai/types/CitationMetadataDict.md): A collection of source attributions for a piece of content. + +[`class CitationSourceDict`](../../google/generativeai/types/CitationSourceDict.md): A citation to a source for a portion of a specific response. + +[`class Completion`](../../google/generativeai/types/Completion.md): The result returned by generativeai.generate_text. + +[`class ContentDict`](../../google/generativeai/types/ContentDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +[`class ContentFilterDict`](../../google/generativeai/types/ContentFilterDict.md): Content filtering metadata associated with processing a single request. + +[`class ExampleDict`](../../google/generativeai/types/ExampleDict.md): A dict representation of a protos.Example. + +[`class File`](../../google/generativeai/types/File.md) + +[`class FileDataDict`](../../google/generativeai/types/FileDataDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +[`class FunctionDeclaration`](../../google/generativeai/types/FunctionDeclaration.md) + +[`class FunctionLibrary`](../../google/generativeai/types/FunctionLibrary.md): A container for a set of `Tool` objects, manages lookup and execution of their functions. + +[`class GenerateContentResponse`](../../google/generativeai/types/GenerateContentResponse.md): Instances of this class manage the response of the `generate_content` method. + +[`class GenerationConfig`](../../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content. + +[`class GenerationConfigDict`](../../google/generativeai/types/GenerationConfigDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +[`class HarmBlockThreshold`](../../google/generativeai/types/HarmBlockThreshold.md): Block at and beyond a specified harm probability. + +[`class HarmCategory`](../../google/generativeai/types/HarmCategory.md): Harm Categories supported by the gemini-family model + +[`class HarmProbability`](../../google/generativeai/types/HarmProbability.md): The probability that a piece of content is harmful. + +[`class IncompleteIterationError`](../../google/generativeai/types/IncompleteIterationError.md): Common base class for all non-exit exceptions. + +[`class MessageDict`](../../google/generativeai/types/MessageDict.md): A dict representation of a protos.Message. + +[`class MessagePromptDict`](../../google/generativeai/types/MessagePromptDict.md): A dict representation of a protos.MessagePrompt. + +[`class Model`](../../google/generativeai/types/Model.md): A dataclass representation of a protos.Model. + +[`class PartDict`](../../google/generativeai/types/PartDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +[`class Permission`](../../google/generativeai/types/Permission.md): A permission to access a resource. + +[`class Permissions`](../../google/generativeai/types/Permissions.md) + +[`class RequestOptions`](../../google/generativeai/types/RequestOptions.md): Request options + +[`class ResponseDict`](../../google/generativeai/types/ResponseDict.md): A dict representation of a protos.GenerateMessageResponse. + +[`class SafetyFeedbackDict`](../../google/generativeai/types/SafetyFeedbackDict.md): Safety feedback for an entire request. + +[`class SafetyRatingDict`](../../google/generativeai/types/SafetyRatingDict.md): Safety rating for a piece of content. + +[`class SafetySettingDict`](../../google/generativeai/types/SafetySettingDict.md): Safety setting, affecting the safety-blocking behavior. + +[`class Status`](../../google/generativeai/types/Status.md): A ProtocolMessage + +[`class StopCandidateException`](../../google/generativeai/types/StopCandidateException.md): Common base class for all non-exit exceptions. + +[`class Tool`](../../google/generativeai/types/Tool.md): A wrapper for protos.Tool, Contains a collection of related `FunctionDeclaration` objects. + +[`class ToolDict`](../../google/generativeai/types/ToolDict.md): dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +[`class TunedModel`](../../google/generativeai/types/TunedModel.md): A dataclass representation of a protos.TunedModel. + +[`class TunedModelState`](../../google/generativeai/types/TunedModelState.md): The state of the tuned model. + +## Functions + +[`TypedDict(...)`](../../google/generativeai/types/TypedDict.md): A simple typed namespace. At runtime it is equivalent to a plain dict. + +[`get_default_file_client(...)`](../../google/generativeai/types/get_default_file_client.md) + +[`to_file_data(...)`](../../google/generativeai/types/to_file_data.md) + +## Type Aliases + +[`AnyModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md) + +[`BaseModelNameOptions`](../../google/generativeai/types/BaseModelNameOptions.md) + +[`BlobType`](../../google/generativeai/types/BlobType.md) + +[`ContentType`](../../google/generativeai/types/ContentType.md) + +[`ContentsType`](../../google/generativeai/types/ContentsType.md) + +[`ExampleOptions`](../../google/generativeai/types/ExampleOptions.md) + +[`ExamplesOptions`](../../google/generativeai/types/ExamplesOptions.md) + +[`FileDataType`](../../google/generativeai/types/FileDataType.md) + +[`FunctionDeclarationType`](../../google/generativeai/types/FunctionDeclarationType.md) + +[`FunctionLibraryType`](../../google/generativeai/types/FunctionLibraryType.md) + +[`GenerationConfigType`](../../google/generativeai/types/GenerationConfigType.md) + +[`MessageOptions`](../../google/generativeai/types/MessageOptions.md) + +[`MessagePromptOptions`](../../google/generativeai/types/MessagePromptOptions.md) + +[`MessagesOptions`](../../google/generativeai/types/MessagesOptions.md) + +[`ModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md) + +[`ModelsIterable`](../../google/generativeai/types/ModelsIterable.md) + +[`PartType`](../../google/generativeai/types/PartType.md) + +[`RequestOptionsType`](../../google/generativeai/types/RequestOptionsType.md) + +[`StrictContentType`](../../google/generativeai/types/StrictContentType.md) + +[`ToolsType`](../../google/generativeai/types/ToolsType.md) + +[`TunedModelNameOptions`](../../google/generativeai/types/TunedModelNameOptions.md) + + + + + + + + + + + + +
+annotations + +Instance of `__future__._Feature` +
+ diff --git a/docs/api/google/generativeai/types/AnyModelNameOptions.md b/docs/api/google/generativeai/types/AnyModelNameOptions.md new file mode 100644 index 000000000..c2090da48 --- /dev/null +++ b/docs/api/google/generativeai/types/AnyModelNameOptions.md @@ -0,0 +1,27 @@ +
+ + +
+ +# google.generativeai.types.AnyModelNameOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md new file mode 100644 index 000000000..0e302ea41 --- /dev/null +++ b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md @@ -0,0 +1,152 @@ +description: This is the async version of genai.GenerateContentResponse. + +
+ + + + + + + +
+ +# google.generativeai.types.AsyncGenerateContentResponse + + + + + + + + + +This is the async version of `genai.GenerateContentResponse`. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`candidates` + +The list of candidate responses. +
+`parts` + +A quick accessor equivalent to `self.candidates[0].content.parts` +
+`prompt_feedback` + + +
+`text` + +A quick accessor equivalent to `self.candidates[0].content.parts[0].text` +
+`usage_metadata` + + +
+ + + +## Methods + +

from_aiterator

+ +View source + + + + + + +

from_response

+ +View source + + + + + + +

resolve

+ +View source + + + + + + +

to_dict

+ +View source + + + +Returns the result as a JSON-compatible dict. + +Note: This doesn't capture the iterator state when streaming, it only captures the accumulated +`GenerateContentResponse` fields. + +``` +>>> import json +>>> response = model.generate_content('Hello?') +>>> json.dumps(response.to_dict()) +``` + + + diff --git a/docs/api/google/generativeai/types/AuthorError.md b/docs/api/google/generativeai/types/AuthorError.md new file mode 100644 index 000000000..0220cd6f5 --- /dev/null +++ b/docs/api/google/generativeai/types/AuthorError.md @@ -0,0 +1,27 @@ +description: Raised by the chat (or reply) functions when the author list can't be normalized. + +
+ + +
+ +# google.generativeai.types.AuthorError + + + + + + + + + +Raised by the `chat` (or `reply`) functions when the author list can't be normalized. + + + + diff --git a/docs/api/google/generativeai/types/BaseModelNameOptions.md b/docs/api/google/generativeai/types/BaseModelNameOptions.md new file mode 100644 index 000000000..25a142c3e --- /dev/null +++ b/docs/api/google/generativeai/types/BaseModelNameOptions.md @@ -0,0 +1,25 @@ +
+ + +
+ +# google.generativeai.types.BaseModelNameOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/BlobDict.md b/docs/api/google/generativeai/types/BlobDict.md new file mode 100644 index 000000000..b13257433 --- /dev/null +++ b/docs/api/google/generativeai/types/BlobDict.md @@ -0,0 +1,27 @@ +description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +
+ + +
+ +# google.generativeai.types.BlobDict + + + + + + + + + +dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + + + For example: dict(one=1, two=2) + diff --git a/docs/api/google/generativeai/types/BlobType.md b/docs/api/google/generativeai/types/BlobType.md new file mode 100644 index 000000000..82039d71c --- /dev/null +++ b/docs/api/google/generativeai/types/BlobType.md @@ -0,0 +1,26 @@ +
+ + +
+ +# google.generativeai.types.BlobType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/BlockedPromptException.md b/docs/api/google/generativeai/types/BlockedPromptException.md new file mode 100644 index 000000000..93dc09556 --- /dev/null +++ b/docs/api/google/generativeai/types/BlockedPromptException.md @@ -0,0 +1,27 @@ +description: Common base class for all non-exit exceptions. + +
+ + +
+ +# google.generativeai.types.BlockedPromptException + + + + + + + + + +Common base class for all non-exit exceptions. + + + + diff --git a/docs/api/google/generativeai/types/BlockedReason.md b/docs/api/google/generativeai/types/BlockedReason.md new file mode 100644 index 000000000..4508388eb --- /dev/null +++ b/docs/api/google/generativeai/types/BlockedReason.md @@ -0,0 +1,687 @@ +description: A list of reasons why content may have been blocked. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.types.BlockedReason + + + + + + + + + +A list of reasons why content may have been blocked. + + + + + + + + + + + + + + + + + + + + + + + + + +
+`BLOCKED_REASON_UNSPECIFIED` + +`0` + +A blocked reason was not specified. +
+`SAFETY` + +`1` + +Content was blocked by safety settings. +
+`OTHER` + +`2` + +Content was blocked, but the reason is +uncategorized. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + +
+BLOCKED_REASON_UNSPECIFIED + +`` +
+OTHER + +`` +
+SAFETY + +`` +
+ diff --git a/docs/api/google/generativeai/types/BrokenResponseError.md b/docs/api/google/generativeai/types/BrokenResponseError.md new file mode 100644 index 000000000..073ade6c0 --- /dev/null +++ b/docs/api/google/generativeai/types/BrokenResponseError.md @@ -0,0 +1,27 @@ +description: Common base class for all non-exit exceptions. + +
+ + +
+ +# google.generativeai.types.BrokenResponseError + + + + + + + + + +Common base class for all non-exit exceptions. + + + + diff --git a/docs/api/google/generativeai/types/CallableFunctionDeclaration.md b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md new file mode 100644 index 000000000..5a8e8eebc --- /dev/null +++ b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md @@ -0,0 +1,144 @@ +description: An extension of FunctionDeclaration that can be built from a python function, and is callable. + +
+ + + + + + + +
+ +# google.generativeai.types.CallableFunctionDeclaration + + + + + + + + + +An extension of `FunctionDeclaration` that can be built from a python function, and is callable. + +Inherits From: [`FunctionDeclaration`](../../../google/generativeai/types/FunctionDeclaration.md) + + + + + + + +Note: The python function must have type annotations. + + + + + + + + + + + + + + + + + + +
+`description` + + +
+`name` + + +
+`parameters` + + +
+ + + +## Methods + +

from_function

+ +View source + + + +Builds a `CallableFunctionDeclaration` from a python function. + +The function should have type annotations. + +This method is able to generate the schema for arguments annotated with types: + +`AllowedTypes = float | int | str | list[AllowedTypes] | dict` + +This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary +contents. But you can build these manually. + +

from_proto

+ +View source + + + + + + +

to_proto

+ +View source + + + + + + +

__call__

+ +View source + + + +Call self as a function. + + + + diff --git a/docs/api/google/generativeai/types/ChatResponse.md b/docs/api/google/generativeai/types/ChatResponse.md new file mode 100644 index 000000000..5b8453b7c --- /dev/null +++ b/docs/api/google/generativeai/types/ChatResponse.md @@ -0,0 +1,223 @@ +description: A chat response from the model. + +
+ + + + + + + +
+ +# google.generativeai.types.ChatResponse + + + + + + + + + +A chat response from the model. + + + +* Use `response.last` (settable) for easy access to the text of the last response. + (`messages[-1]['content']`) +* Use `response.messages` to access the message history (including `.last`). +* Use `response.candidates` to access all the responses generated by the model. + +Other attributes are just saved from the arguments to `genai.chat`, so you +can easily continue a conversation: + +``` +import google.generativeai as genai + +genai.configure(api_key=os.environ['GOOGLE_API_KEY']) + +response = genai.chat(messages=["Hello."]) +print(response.last) # 'Hello! What can I help you with?' +response.reply("Can you tell me a joke?") +``` + +See `genai.chat` for more details. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`candidates` + +A list of candidate responses from the model. + +The top candidate is appended to the `messages` field. + +This list will contain a *maximum* of `candidate_count` candidates. +It may contain fewer (duplicates are dropped), it will contain at least one. + +Note: The `temperature` field affects the variability of the responses. Low +temperatures will return few candidates. Setting `temperature=0` is deterministic, +so it will only ever return one candidate. +
+`filters` + +This indicates which `types.SafetyCategory`(s) blocked a +candidate from this response, the lowest types.HarmProbability +that triggered a block, and the `types.HarmThreshold` setting for that category. +This indicates the smallest change to the `types.SafetySettings` that would be +necessary to unblock at least 1 response. + +The blocking is configured by the `types.SafetySettings` in the request (or the +default `types.SafetySettings` of the API). +
+`messages` + +A snapshot of the conversation history sorted chronologically. +
+`model` + +The model name. +
+`context` + +Text that should be provided to the model first, to ground the response. +
+`examples` + +Examples of what the model should generate. +
+`temperature` + +Controls the randomness of the output. Must be positive. +
+`candidate_count` + +The **maximum** number of generated response messages to return. +
+`top_k` + +The maximum number of tokens to consider when sampling. +
+`top_p` + +The maximum cumulative probability of tokens to consider when sampling. +
+`last` + +A settable property that provides simple access to the last response string + + +A shortcut for `response.messages[0]['content']`. +
+ + + +## Methods + +

reply

+ +View source + + + +Add a message to the conversation, and get the model's response. + + +

to_dict

+ +View source + + + + + + +

__eq__

+ + + +Return self==value. + + + + + + + + + + + + + + + + + + +
+top_k + +`None` +
+top_p + +`None` +
+ diff --git a/docs/api/google/generativeai/types/CitationMetadataDict.md b/docs/api/google/generativeai/types/CitationMetadataDict.md new file mode 100644 index 000000000..fc61779d2 --- /dev/null +++ b/docs/api/google/generativeai/types/CitationMetadataDict.md @@ -0,0 +1,48 @@ +description: A collection of source attributions for a piece of content. + +
+ + +
+ +# google.generativeai.types.CitationMetadataDict + + + + + + + + + +A collection of source attributions for a piece of content. + + + + + + + + + + + + + + + +
+`citation_sources` + +`MutableSequence[google.ai.generativelanguage.CitationSource]` + +Citations to sources for a specific response. +
+ + + diff --git a/docs/api/google/generativeai/types/CitationSourceDict.md b/docs/api/google/generativeai/types/CitationSourceDict.md new file mode 100644 index 000000000..2df1ee443 --- /dev/null +++ b/docs/api/google/generativeai/types/CitationSourceDict.md @@ -0,0 +1,84 @@ +description: A citation to a source for a portion of a specific response. + +
+ + +
+ +# google.generativeai.types.CitationSourceDict + + + + + + + + + +A citation to a source for a portion of a specific response. + + + + + + + + + + + + + + + + + + + + + + + + +
+`start_index` + +`int` + +Optional. Start of segment of the response +that is attributed to this source. + +Index indicates the start of the segment, +measured in bytes. +
+`end_index` + +`int` + +Optional. End of the attributed segment, +exclusive. +
+`uri` + +`str` + +Optional. URI that is attributed as a source +for a portion of the text. +
+`license_` + +`str` + +Optional. License for the GitHub project that +is attributed as a source for segment. + +License info is required for code citations. +
+ + + diff --git a/docs/api/google/generativeai/types/Completion.md b/docs/api/google/generativeai/types/Completion.md new file mode 100644 index 000000000..9b7ac8daf --- /dev/null +++ b/docs/api/google/generativeai/types/Completion.md @@ -0,0 +1,97 @@ +description: The result returned by generativeai.generate_text. + +
+ + + + +
+ +# google.generativeai.types.Completion + + + + + + + + + +The result returned by generativeai.generate_text. + + + +Use GenerateTextResponse.candidates to access all the completions generated by the model. + + + + + + + + + + + + + + + + + + + + + +
+`candidates` + +A list of candidate text completions generated by the model. +
+`result` + +The output of the first candidate, +
+`filters` + +Indicates the reasons why content may have been blocked. +See types.BlockedReason. +
+`safety_feedback` + +Indicates which safety settings blocked content in this result. +
+ + + +## Methods + +

to_dict

+ +View source + + + + + + +

__eq__

+ + + +Return self==value. + + + + diff --git a/docs/api/google/generativeai/types/ContentDict.md b/docs/api/google/generativeai/types/ContentDict.md new file mode 100644 index 000000000..3334b93a0 --- /dev/null +++ b/docs/api/google/generativeai/types/ContentDict.md @@ -0,0 +1,27 @@ +description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +
+ + +
+ +# google.generativeai.types.ContentDict + + + + + + + + + +dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + + + For example: dict(one=1, two=2) + diff --git a/docs/api/google/generativeai/types/ContentFilterDict.md b/docs/api/google/generativeai/types/ContentFilterDict.md new file mode 100644 index 000000000..a15074d73 --- /dev/null +++ b/docs/api/google/generativeai/types/ContentFilterDict.md @@ -0,0 +1,62 @@ +description: Content filtering metadata associated with processing a single request. + +
+ + +
+ +# google.generativeai.types.ContentFilterDict + + + + + + + + + +Content filtering metadata associated with processing a single request. + + +ContentFilter contains a reason and an optional supporting +string. The reason may be unspecified. + + + + + + + + + + + + + + + + + +
+`reason` + +`google.ai.generativelanguage.ContentFilter.BlockedReason` + +The reason content was blocked during request +processing. +
+`message` + +`str` + +A string that describes the filtering +behavior in more detail. +
+ + + diff --git a/docs/api/google/generativeai/types/ContentType.md b/docs/api/google/generativeai/types/ContentType.md new file mode 100644 index 000000000..775f39380 --- /dev/null +++ b/docs/api/google/generativeai/types/ContentType.md @@ -0,0 +1,38 @@ +
+ + +
+ +# google.generativeai.types.ContentType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/ContentsType.md b/docs/api/google/generativeai/types/ContentsType.md new file mode 100644 index 000000000..79a3feef8 --- /dev/null +++ b/docs/api/google/generativeai/types/ContentsType.md @@ -0,0 +1,40 @@ +
+ + +
+ +# google.generativeai.types.ContentsType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/ExampleDict.md b/docs/api/google/generativeai/types/ExampleDict.md new file mode 100644 index 000000000..a2fae446a --- /dev/null +++ b/docs/api/google/generativeai/types/ExampleDict.md @@ -0,0 +1,27 @@ +description: A dict representation of a protos.Example. + +
+ + +
+ +# google.generativeai.types.ExampleDict + + + + + + + + + +A dict representation of a protos.Example. + + + + diff --git a/docs/api/google/generativeai/types/ExampleOptions.md b/docs/api/google/generativeai/types/ExampleOptions.md new file mode 100644 index 000000000..6aa3bc965 --- /dev/null +++ b/docs/api/google/generativeai/types/ExampleOptions.md @@ -0,0 +1,26 @@ +
+ + +
+ +# google.generativeai.types.ExampleOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/ExamplesOptions.md b/docs/api/google/generativeai/types/ExamplesOptions.md new file mode 100644 index 000000000..71389d120 --- /dev/null +++ b/docs/api/google/generativeai/types/ExamplesOptions.md @@ -0,0 +1,27 @@ +
+ + +
+ +# google.generativeai.types.ExamplesOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/File.md b/docs/api/google/generativeai/types/File.md new file mode 100644 index 000000000..52f5d6996 --- /dev/null +++ b/docs/api/google/generativeai/types/File.md @@ -0,0 +1,170 @@ +
+ + + + + + +
+ +# google.generativeai.types.File + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`create_time` + + +
+`display_name` + + +
+`error` + + +
+`expiration_time` + + +
+`mime_type` + + +
+`name` + + +
+`sha256_hash` + + +
+`size_bytes` + + +
+`state` + + +
+`update_time` + + +
+`uri` + + +
+`video_metadata` + + +
+ + + +## Methods + +

delete

+ +View source + + + + + + +

to_dict

+ +View source + + + + + + +

to_proto

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/FileDataDict.md b/docs/api/google/generativeai/types/FileDataDict.md new file mode 100644 index 000000000..455cafc5d --- /dev/null +++ b/docs/api/google/generativeai/types/FileDataDict.md @@ -0,0 +1,27 @@ +description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +
+ + +
+ +# google.generativeai.types.FileDataDict + + + + + + + + + +dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + + + For example: dict(one=1, two=2) + diff --git a/docs/api/google/generativeai/types/FileDataType.md b/docs/api/google/generativeai/types/FileDataType.md new file mode 100644 index 000000000..20d2cefd0 --- /dev/null +++ b/docs/api/google/generativeai/types/FileDataType.md @@ -0,0 +1,26 @@ +
+ + +
+ +# google.generativeai.types.FileDataType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/FunctionDeclaration.md b/docs/api/google/generativeai/types/FunctionDeclaration.md new file mode 100644 index 000000000..672fd711d --- /dev/null +++ b/docs/api/google/generativeai/types/FunctionDeclaration.md @@ -0,0 +1,121 @@ +
+ + + + + + +
+ +# google.generativeai.types.FunctionDeclaration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`description` + + +
+`name` + + +
+`parameters` + + +
+ + + +## Methods + +

from_function

+ +View source + + + +Builds a `CallableFunctionDeclaration` from a python function. + +The function should have type annotations. + +This method is able to generate the schema for arguments annotated with types: + +`AllowedTypes = float | int | str | list[AllowedTypes] | dict` + +This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary +contents. But you can build these manually. + +

from_proto

+ +View source + + + + + + +

to_proto

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/FunctionDeclarationType.md b/docs/api/google/generativeai/types/FunctionDeclarationType.md new file mode 100644 index 000000000..4d9e8a85a --- /dev/null +++ b/docs/api/google/generativeai/types/FunctionDeclarationType.md @@ -0,0 +1,26 @@ +
+ + +
+ +# google.generativeai.types.FunctionDeclarationType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/FunctionLibrary.md b/docs/api/google/generativeai/types/FunctionLibrary.md new file mode 100644 index 000000000..e8d386181 --- /dev/null +++ b/docs/api/google/generativeai/types/FunctionLibrary.md @@ -0,0 +1,80 @@ +description: A container for a set of Tool objects, manages lookup and execution of their functions. + +
+ + + + + + +
+ +# google.generativeai.types.FunctionLibrary + + + + + + + + + +A container for a set of `Tool` objects, manages lookup and execution of their functions. + + + + + + + + +## Methods + +

to_proto

+ +View source + + + + + + +

__call__

+ +View source + + + +Call self as a function. + + +

__getitem__

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/FunctionLibraryType.md b/docs/api/google/generativeai/types/FunctionLibraryType.md new file mode 100644 index 000000000..2b49e5ad1 --- /dev/null +++ b/docs/api/google/generativeai/types/FunctionLibraryType.md @@ -0,0 +1,32 @@ +
+ + +
+ +# google.generativeai.types.FunctionLibraryType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/GenerateContentResponse.md b/docs/api/google/generativeai/types/GenerateContentResponse.md new file mode 100644 index 000000000..5f1c6c118 --- /dev/null +++ b/docs/api/google/generativeai/types/GenerateContentResponse.md @@ -0,0 +1,185 @@ +description: Instances of this class manage the response of the generate_content method. + +
+ + + + + + + + +
+ +# google.generativeai.types.GenerateContentResponse + + + + + + + + + +Instances of this class manage the response of the `generate_content` method. + + + + + + + +These are returned by GenerativeModel.generate_content and ChatSession.send_message. +This object is based on the low level protos.GenerateContentResponse class which just has `prompt_feedback` +and `candidates` attributes. This class adds several quick accessors for common use cases. + +The same object type is returned for both `stream=True/False`. + +### Streaming + +When you pass `stream=True` to GenerativeModel.generate_content or ChatSession.send_message, +iterate over this object to receive chunks of the response: + +``` +response = model.generate_content(..., stream=True): +for chunk in response: + print(chunk.text) +``` + +GenerateContentResponse.prompt_feedback is available immediately but +GenerateContentResponse.candidates, and all the attributes derived from them (`.text`, `.parts`), +are only available after the iteration is complete. + + + + + + + + + + + + + + + + + + + + + + + + +
+`candidates` + +The list of candidate responses. +
+`parts` + +A quick accessor equivalent to `self.candidates[0].content.parts` +
+`prompt_feedback` + + +
+`text` + +A quick accessor equivalent to `self.candidates[0].content.parts[0].text` +
+`usage_metadata` + + +
+ + + +## Methods + +

from_iterator

+ +View source + + + + + + +

from_response

+ +View source + + + + + + +

resolve

+ +View source + + + + + + +

to_dict

+ +View source + + + +Returns the result as a JSON-compatible dict. + +Note: This doesn't capture the iterator state when streaming, it only captures the accumulated +`GenerateContentResponse` fields. + +``` +>>> import json +>>> response = model.generate_content('Hello?') +>>> json.dumps(response.to_dict()) +``` + +

__iter__

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/GenerationConfig.md b/docs/api/google/generativeai/types/GenerationConfig.md new file mode 100644 index 000000000..0a51b7978 --- /dev/null +++ b/docs/api/google/generativeai/types/GenerationConfig.md @@ -0,0 +1,255 @@ +description: A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content. + +
+ + + + + + + + + + + + +
+ +# google.generativeai.types.GenerationConfig + + + + + + + + + +A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`candidate_count` + + Number of generated responses to return. +
+`stop_sequences` + + The set of character sequences (up +to 5) that will stop output generation. If +specified, the API will stop at the first +appearance of a stop sequence. The stop sequence +will not be included as part of the response. +
+`max_output_tokens` + + The maximum number of tokens to include in a +candidate. + +If unset, this will default to output_token_limit specified +in the model's specification. +
+`temperature` + + Controls the randomness of the output. Note: The +default value varies by model, see the Model.temperature +attribute of the `Model` returned the `genai.get_model` +function. + +Values can range from [0.0,1.0], inclusive. A value closer +to 1.0 will produce responses that are more varied and +creative, while a value closer to 0.0 will typically result +in more straightforward responses from the model. +
+`top_p` + + Optional. The maximum cumulative probability of tokens to +consider when sampling. + +The model uses combined Top-k and nucleus sampling. + +Tokens are sorted based on their assigned probabilities so +that only the most likely tokens are considered. Top-k +sampling directly limits the maximum number of tokens to +consider, while Nucleus sampling limits number of tokens +based on the cumulative probability. + +Note: The default value varies by model, see the +Model.top_p attribute of the `Model` returned the +`genai.get_model` function. +
+`top_k` + +`int` + +Optional. The maximum number of tokens to consider when +sampling. + +The model uses combined Top-k and nucleus sampling. + +Top-k sampling considers the set of `top_k` most probable +tokens. Defaults to 40. + +Note: The default value varies by model, see the +Model.top_k attribute of the `Model` returned the +`genai.get_model` function. +
+`response_mime_type` + + Optional. Output response mimetype of the generated candidate text. + +Supported mimetype: + `text/plain`: (default) Text output. + `application/json`: JSON response in the candidates. +
+`response_schema` + + Optional. Specifies the format of the JSON requested if response_mime_type is +`application/json`. +
+ + + +## Methods + +

__eq__

+ + + +Return self==value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+candidate_count + +`None` +
+max_output_tokens + +`None` +
+response_mime_type + +`None` +
+response_schema + +`None` +
+stop_sequences + +`None` +
+temperature + +`None` +
+top_k + +`None` +
+top_p + +`None` +
+ diff --git a/docs/api/google/generativeai/types/GenerationConfigDict.md b/docs/api/google/generativeai/types/GenerationConfigDict.md new file mode 100644 index 000000000..9cd508a9a --- /dev/null +++ b/docs/api/google/generativeai/types/GenerationConfigDict.md @@ -0,0 +1,27 @@ +description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +
+ + +
+ +# google.generativeai.types.GenerationConfigDict + + + + + + + + + +dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + + + For example: dict(one=1, two=2) + diff --git a/docs/api/google/generativeai/types/GenerationConfigType.md b/docs/api/google/generativeai/types/GenerationConfigType.md new file mode 100644 index 000000000..fcd42bdf9 --- /dev/null +++ b/docs/api/google/generativeai/types/GenerationConfigType.md @@ -0,0 +1,25 @@ +
+ + +
+ +# google.generativeai.types.GenerationConfigType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/HarmBlockThreshold.md b/docs/api/google/generativeai/types/HarmBlockThreshold.md new file mode 100644 index 000000000..bea677e9b --- /dev/null +++ b/docs/api/google/generativeai/types/HarmBlockThreshold.md @@ -0,0 +1,722 @@ +description: Block at and beyond a specified harm probability. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.types.HarmBlockThreshold + + + + + + + + + +Block at and beyond a specified harm probability. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`HARM_BLOCK_THRESHOLD_UNSPECIFIED` + +`0` + +Threshold is unspecified. +
+`BLOCK_LOW_AND_ABOVE` + +`1` + +Content with NEGLIGIBLE will be allowed. +
+`BLOCK_MEDIUM_AND_ABOVE` + +`2` + +Content with NEGLIGIBLE and LOW will be +allowed. +
+`BLOCK_ONLY_HIGH` + +`3` + +Content with NEGLIGIBLE, LOW, and MEDIUM will +be allowed. +
+`BLOCK_NONE` + +`4` + +All content will be allowed. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+BLOCK_LOW_AND_ABOVE + +`` +
+BLOCK_MEDIUM_AND_ABOVE + +`` +
+BLOCK_NONE + +`` +
+BLOCK_ONLY_HIGH + +`` +
+HARM_BLOCK_THRESHOLD_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/types/HarmCategory.md b/docs/api/google/generativeai/types/HarmCategory.md new file mode 100644 index 000000000..a22f1f060 --- /dev/null +++ b/docs/api/google/generativeai/types/HarmCategory.md @@ -0,0 +1,657 @@ +description: Harm Categories supported by the gemini-family model + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.types.HarmCategory + + + + + + + + + +Harm Categories supported by the gemini-family model + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+HARM_CATEGORY_DANGEROUS_CONTENT + +`` +
+HARM_CATEGORY_HARASSMENT + +`` +
+HARM_CATEGORY_HATE_SPEECH + +`` +
+HARM_CATEGORY_SEXUALLY_EXPLICIT + +`` +
+HARM_CATEGORY_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/types/HarmProbability.md b/docs/api/google/generativeai/types/HarmProbability.md new file mode 100644 index 000000000..d383b3530 --- /dev/null +++ b/docs/api/google/generativeai/types/HarmProbability.md @@ -0,0 +1,724 @@ +description: The probability that a piece of content is harmful. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.types.HarmProbability + + + + + + + + + +The probability that a piece of content is harmful. + + + + + + + + + +The classification system gives the probability of the content +being unsafe. This does not indicate the severity of harm for a +piece of content. + + + + + + + + + + + + + + + + + + + + + + +
+`HARM_PROBABILITY_UNSPECIFIED` + +`0` + +Probability is unspecified. +
+`NEGLIGIBLE` + +`1` + +Content has a negligible chance of being +unsafe. +
+`LOW` + +`2` + +Content has a low chance of being unsafe. +
+`MEDIUM` + +`3` + +Content has a medium chance of being unsafe. +
+`HIGH` + +`4` + +Content has a high chance of being unsafe. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+HARM_PROBABILITY_UNSPECIFIED + +`` +
+HIGH + +`` +
+LOW + +`` +
+MEDIUM + +`` +
+NEGLIGIBLE + +`` +
+ diff --git a/docs/api/google/generativeai/types/IncompleteIterationError.md b/docs/api/google/generativeai/types/IncompleteIterationError.md new file mode 100644 index 000000000..03eaced0f --- /dev/null +++ b/docs/api/google/generativeai/types/IncompleteIterationError.md @@ -0,0 +1,27 @@ +description: Common base class for all non-exit exceptions. + +
+ + +
+ +# google.generativeai.types.IncompleteIterationError + + + + + + + + + +Common base class for all non-exit exceptions. + + + + diff --git a/docs/api/google/generativeai/types/MessageDict.md b/docs/api/google/generativeai/types/MessageDict.md new file mode 100644 index 000000000..2b9d0d5ba --- /dev/null +++ b/docs/api/google/generativeai/types/MessageDict.md @@ -0,0 +1,27 @@ +description: A dict representation of a protos.Message. + +
+ + +
+ +# google.generativeai.types.MessageDict + + + + + + + + + +A dict representation of a protos.Message. + + + + diff --git a/docs/api/google/generativeai/types/MessageOptions.md b/docs/api/google/generativeai/types/MessageOptions.md new file mode 100644 index 000000000..9e7ad68e7 --- /dev/null +++ b/docs/api/google/generativeai/types/MessageOptions.md @@ -0,0 +1,25 @@ +
+ + +
+ +# google.generativeai.types.MessageOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/MessagePromptDict.md b/docs/api/google/generativeai/types/MessagePromptDict.md new file mode 100644 index 000000000..51abe73fe --- /dev/null +++ b/docs/api/google/generativeai/types/MessagePromptDict.md @@ -0,0 +1,27 @@ +description: A dict representation of a protos.MessagePrompt. + +
+ + +
+ +# google.generativeai.types.MessagePromptDict + + + + + + + + + +A dict representation of a protos.MessagePrompt. + + + + diff --git a/docs/api/google/generativeai/types/MessagePromptOptions.md b/docs/api/google/generativeai/types/MessagePromptOptions.md new file mode 100644 index 000000000..54a0dc48c --- /dev/null +++ b/docs/api/google/generativeai/types/MessagePromptOptions.md @@ -0,0 +1,27 @@ +
+ + +
+ +# google.generativeai.types.MessagePromptOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/MessagesOptions.md b/docs/api/google/generativeai/types/MessagesOptions.md new file mode 100644 index 000000000..77c310024 --- /dev/null +++ b/docs/api/google/generativeai/types/MessagesOptions.md @@ -0,0 +1,26 @@ +
+ + +
+ +# google.generativeai.types.MessagesOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/Model.md b/docs/api/google/generativeai/types/Model.md new file mode 100644 index 000000000..e3c122a06 --- /dev/null +++ b/docs/api/google/generativeai/types/Model.md @@ -0,0 +1,205 @@ +description: A dataclass representation of a protos.Model. + +
+ + + + + + + + +
+ +# google.generativeai.types.Model + + + + + + + + + +A dataclass representation of a protos.Model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming +convention of: "{base_model_id}-{version}". For example: `models/chat-bison-001`. +
+`base_model_id` + +The base name of the model. For example: `chat-bison`. +
+`version` + + The major version number of the model. For example: `001`. +
+`display_name` + +The human-readable name of the model. E.g. `"Chat Bison"`. The name can be up +to 128 characters long and can consist of any UTF-8 characters. +
+`description` + +A short description of the model. +
+`input_token_limit` + +Maximum number of input tokens allowed for this model. +
+`output_token_limit` + +Maximum number of output tokens available for this model. +
+`supported_generation_methods` + +lists which methods are supported by the model. The method +names are defined as Pascal case strings, such as `generateMessage` which correspond to +API methods. +
+`temperature` + +Dataclass field +
+`max_temperature` + +Dataclass field +
+`top_p` + +Dataclass field +
+`top_k` + +Dataclass field +
+ + + +## Methods + +

__eq__

+ + + +Return self==value. + + + + + + + + + + + + + + + + + + + + + + + + +
+max_temperature + +`None` +
+temperature + +`None` +
+top_k + +`None` +
+top_p + +`None` +
+ diff --git a/docs/api/google/generativeai/types/ModelsIterable.md b/docs/api/google/generativeai/types/ModelsIterable.md new file mode 100644 index 000000000..e5e4f8774 --- /dev/null +++ b/docs/api/google/generativeai/types/ModelsIterable.md @@ -0,0 +1,23 @@ +
+ + +
+ +# google.generativeai.types.ModelsIterable + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/PartDict.md b/docs/api/google/generativeai/types/PartDict.md new file mode 100644 index 000000000..ccf40e29e --- /dev/null +++ b/docs/api/google/generativeai/types/PartDict.md @@ -0,0 +1,27 @@ +description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +
+ + +
+ +# google.generativeai.types.PartDict + + + + + + + + + +dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + + + For example: dict(one=1, two=2) + diff --git a/docs/api/google/generativeai/types/PartType.md b/docs/api/google/generativeai/types/PartType.md new file mode 100644 index 000000000..0490bae6b --- /dev/null +++ b/docs/api/google/generativeai/types/PartType.md @@ -0,0 +1,35 @@ +
+ + +
+ +# google.generativeai.types.PartType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/Permission.md b/docs/api/google/generativeai/types/Permission.md new file mode 100644 index 000000000..22697e497 --- /dev/null +++ b/docs/api/google/generativeai/types/Permission.md @@ -0,0 +1,274 @@ +description: A permission to access a resource. + +
+ + + + + + + + + + + + +
+ +# google.generativeai.types.Permission + + + + + + + + + +A permission to access a resource. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`name` + +Dataclass field +
+`role` + +Dataclass field +
+`grantee_type` + +Dataclass field +
+`email_address` + +Dataclass field +
+ + + +## Methods + +

delete

+ +View source + + + +Delete permission (self). + + +

delete_async

+ +View source + + + +This is the async version of Permission.delete. + + +

get

+ +View source + + + +Get information about a specific permission. + + + + + + + + + + + +
Args
+`name` + +The name of the permission to get. +
+ + + + + + + + + + + +
Returns
+Requested permission as an instance of `Permission`. +
+ + + +

get_async

+ +View source + + + +This is the async version of Permission.get. + + +

to_dict

+ +View source + + + + + + +

update

+ +View source + + + +Update a list of fields for a specified permission. + + + + + + + + + + + +
Args
+`updates` + +The list of fields to update. +Currently only `role` is supported as an update path. +
+ + + + + + + + + + + +
Returns
+`Permission` object with specified updates. +
+ + + +

update_async

+ +View source + + + +This is the async version of Permission.update. + + +

__eq__

+ + + +Return self==value. + + + + + + + + + + + + + + + +
+email_address + +`None` +
+ diff --git a/docs/api/google/generativeai/types/Permissions.md b/docs/api/google/generativeai/types/Permissions.md new file mode 100644 index 000000000..3f889fd17 --- /dev/null +++ b/docs/api/google/generativeai/types/Permissions.md @@ -0,0 +1,386 @@ +
+ + + + + + + + + + + + +
+ +# google.generativeai.types.Permissions + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`parent` + + +
+ + + +## Methods + +

create

+ +View source + + + +Create a new permission on a resource (self). + + + + + + + + + + + + + + + + + + + + +
Args
+`parent` + +The resource name of the parent resource in which the permission will be listed. +
+`role` + +role that will be granted by the permission. +
+`grantee_type` + +The type of the grantee for the permission. +
+`email_address` + +The email address of the grantee. +
+ + + + + + + + + + + +
Returns
+`Permission` object with specified parent, role, grantee type, and email address. +
+ + + + + + + + + + + + + + + +
Raises
+`ValueError` + +When email_address is specified and grantee_type is set to EVERYONE. +
+`ValueError` + +When email_address is not specified and grantee_type is not set to EVERYONE. +
+ + + +

create_async

+ +View source + + + +This is the async version of `PermissionAdapter.create_permission`. + + +

get

+ +View source + + + +Get information about a specific permission. + + + + + + + + + + + +
Args
+`name` + +The name of the permission to get. +
+ + + + + + + + + + + +
Returns
+Requested permission as an instance of `Permission`. +
+ + + +

get_async

+ +View source + + + +Get information about a specific permission. + + + + + + + + + + + +
Args
+`name` + +The name of the permission to get. +
+ + + + + + + + + + + +
Returns
+Requested permission as an instance of `Permission`. +
+ + + +

list

+ +View source + + + +List `Permission`s enforced on a resource (self). + + + + + + + + + + + + + + +
Args
+`parent` + +The resource name of the parent resource in which the permission will be listed. +
+`page_size` + +The maximum number of permissions to return (per page). The service may return fewer permissions. +
+ + + + + + + + + + + +
Returns
+Paginated list of `Permission` objects. +
+ + + +

list_async

+ +View source + + + +This is the async version of `PermissionAdapter.list_permissions`. + + +

transfer_ownership

+ +View source + + + +Transfer ownership of a resource (self) to a new owner. + + + + + + + + + + + + + + +
Args
+`name` + +Name of the resource to transfer ownership. +
+`email_address` + +Email address of the new owner. +
+ + + +

transfer_ownership_async

+ +View source + + + +This is the async version of `PermissionAdapter.transfer_ownership`. + + +

__iter__

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/RequestOptions.md b/docs/api/google/generativeai/types/RequestOptions.md new file mode 100644 index 000000000..1e3cfd324 --- /dev/null +++ b/docs/api/google/generativeai/types/RequestOptions.md @@ -0,0 +1,209 @@ +description: Request options + +
+ + + + + + + + + + + + +
+ +# google.generativeai.types.RequestOptions + + + + + + + + + +Request options + + + + + + + + +``` +>>> import google.generativeai as genai +>>> from google.generativeai.types import RequestOptions +>>> from google.api_core import retry +>>> +>>> model = genai.GenerativeModel() +>>> response = model.generate_content('Hello', +... request_options=RequestOptions( +... retry=retry.Retry(initial=10, multiplier=2, maximum=60, timeout=300))) +>>> response = model.generate_content('Hello', +... request_options=RequestOptions(timeout=600))) +``` + + + + + + + + + + + + + +
+`retry` + +Refer to [retry docs](https://googleapis.dev/python/google-api-core/latest/retry.html) for details. +
+`timeout` + +In seconds (or provide a [TimeToDeadlineTimeout](https://googleapis.dev/python/google-api-core/latest/timeout.html) object). +
+ + + + + + + + + + + + + + + + + +
+`retry` + +Dataclass field +
+`timeout` + +Dataclass field +
+ + + +## Methods + +

get

+ + + +D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. + + +

items

+ + + +D.items() -> a set-like object providing a view on D's items + + +

keys

+ + + +D.keys() -> a set-like object providing a view on D's keys + + +

values

+ + + +D.values() -> an object providing a view on D's values + + +

__contains__

+ + + + + + +

__eq__

+ + + +Return self==value. + + +

__getitem__

+ +View source + + + + + + +

__iter__

+ +View source + + + + + + +

__len__

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/RequestOptionsType.md b/docs/api/google/generativeai/types/RequestOptionsType.md new file mode 100644 index 000000000..aeee187f1 --- /dev/null +++ b/docs/api/google/generativeai/types/RequestOptionsType.md @@ -0,0 +1,24 @@ +
+ + +
+ +# google.generativeai.types.RequestOptionsType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/ResponseDict.md b/docs/api/google/generativeai/types/ResponseDict.md new file mode 100644 index 000000000..2a5119a22 --- /dev/null +++ b/docs/api/google/generativeai/types/ResponseDict.md @@ -0,0 +1,27 @@ +description: A dict representation of a protos.GenerateMessageResponse. + +
+ + +
+ +# google.generativeai.types.ResponseDict + + + + + + + + + +A dict representation of a protos.GenerateMessageResponse. + + + + diff --git a/docs/api/google/generativeai/types/SafetyFeedbackDict.md b/docs/api/google/generativeai/types/SafetyFeedbackDict.md new file mode 100644 index 000000000..121d94e3a --- /dev/null +++ b/docs/api/google/generativeai/types/SafetyFeedbackDict.md @@ -0,0 +1,63 @@ +description: Safety feedback for an entire request. + +
+ + +
+ +# google.generativeai.types.SafetyFeedbackDict + + + + + + + + + +Safety feedback for an entire request. + + + +This field is populated if content in the input and/or response +is blocked due to safety settings. SafetyFeedback may not exist +for every HarmCategory. Each SafetyFeedback will return the +safety settings used by the request as well as the lowest +HarmProbability that should be allowed in order to return a +result. + + + + + + + + + + + + + + + +
+`rating` + +`google.ai.generativelanguage.SafetyRating` + +Safety rating evaluated from content. +
+`setting` + +`google.ai.generativelanguage.SafetySetting` + +Safety settings applied to the request. +
+ + + diff --git a/docs/api/google/generativeai/types/SafetyRatingDict.md b/docs/api/google/generativeai/types/SafetyRatingDict.md new file mode 100644 index 000000000..bc10071b5 --- /dev/null +++ b/docs/api/google/generativeai/types/SafetyRatingDict.md @@ -0,0 +1,73 @@ +description: Safety rating for a piece of content. + +
+ + +
+ +# google.generativeai.types.SafetyRatingDict + + + + + + + + + +Safety rating for a piece of content. + + + +The safety rating contains the category of harm and the harm +probability level in that category for a piece of content. +Content is classified for safety across a number of harm +categories and the probability of the harm classification is +included here. + + + + + + + + + + + + + + + + + + +
+`category` + +`google.ai.generativelanguage.HarmCategory` + +Required. The category for this rating. +
+`probability` + +`google.ai.generativelanguage.SafetyRating.HarmProbability` + +Required. The probability of harm for this +content. +
+`blocked` + +`bool` + +Was this content blocked because of this +rating? +
+ + + diff --git a/docs/api/google/generativeai/types/SafetySettingDict.md b/docs/api/google/generativeai/types/SafetySettingDict.md new file mode 100644 index 000000000..fd15a443b --- /dev/null +++ b/docs/api/google/generativeai/types/SafetySettingDict.md @@ -0,0 +1,60 @@ +description: Safety setting, affecting the safety-blocking behavior. + +
+ + +
+ +# google.generativeai.types.SafetySettingDict + + + + + + + + + +Safety setting, affecting the safety-blocking behavior. + + + +Passing a safety setting for a category changes the allowed +probability that content is blocked. + + + + + + + + + + + + + + + +
+`category` + +`google.ai.generativelanguage.HarmCategory` + +Required. The category for this setting. +
+`threshold` + +`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold` + +Required. Controls the probability threshold +at which harm is blocked. +
+ + + diff --git a/docs/api/google/generativeai/types/Status.md b/docs/api/google/generativeai/types/Status.md new file mode 100644 index 000000000..c306a9ded --- /dev/null +++ b/docs/api/google/generativeai/types/Status.md @@ -0,0 +1,55 @@ +description: A ProtocolMessage + +
+ + +
+ +# google.generativeai.types.Status + + + + + + + + + +A ProtocolMessage + + + + + + + + + + + + + + + + + + + + + +
+`code` + +`int32 code` +
+`details` + +`repeated Any details` +
+`message` + +`string message` +
+ + + diff --git a/docs/api/google/generativeai/types/StopCandidateException.md b/docs/api/google/generativeai/types/StopCandidateException.md new file mode 100644 index 000000000..56484a013 --- /dev/null +++ b/docs/api/google/generativeai/types/StopCandidateException.md @@ -0,0 +1,27 @@ +description: Common base class for all non-exit exceptions. + +
+ + +
+ +# google.generativeai.types.StopCandidateException + + + + + + + + + +Common base class for all non-exit exceptions. + + + + diff --git a/docs/api/google/generativeai/types/StrictContentType.md b/docs/api/google/generativeai/types/StrictContentType.md new file mode 100644 index 000000000..cfd497595 --- /dev/null +++ b/docs/api/google/generativeai/types/StrictContentType.md @@ -0,0 +1,24 @@ +
+ + +
+ +# google.generativeai.types.StrictContentType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/Tool.md b/docs/api/google/generativeai/types/Tool.md new file mode 100644 index 000000000..ac49e31da --- /dev/null +++ b/docs/api/google/generativeai/types/Tool.md @@ -0,0 +1,107 @@ +description: A wrapper for protos.Tool, Contains a collection of related FunctionDeclaration objects. + +
+ + + + + + +
+ +# google.generativeai.types.Tool + + + + + + + + + +A wrapper for protos.Tool, Contains a collection of related `FunctionDeclaration` objects. + + + + + + + + + + + + + + + + + + + + + + +
+`code_execution` + + +
+`function_declarations` + + +
+ + + +## Methods + +

to_proto

+ +View source + + + + + + +

__call__

+ +View source + + + +Call self as a function. + + +

__getitem__

+ +View source + + + + + + + + diff --git a/docs/api/google/generativeai/types/ToolDict.md b/docs/api/google/generativeai/types/ToolDict.md new file mode 100644 index 000000000..01814ceb3 --- /dev/null +++ b/docs/api/google/generativeai/types/ToolDict.md @@ -0,0 +1,27 @@ +description: dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + +
+ + +
+ +# google.generativeai.types.ToolDict + + + + + + + + + +dict() -> new empty dictionary dict(mapping) -> new dictionary initialized from a mapping object's (key, value) pairs dict(iterable) -> new dictionary initialized as if via: d = {} for k, v in iterable: d[k] = v dict(**kwargs) -> new dictionary initialized with the name=value pairs in the keyword argument list. + + + For example: dict(one=1, two=2) + diff --git a/docs/api/google/generativeai/types/ToolsType.md b/docs/api/google/generativeai/types/ToolsType.md new file mode 100644 index 000000000..9b9430a65 --- /dev/null +++ b/docs/api/google/generativeai/types/ToolsType.md @@ -0,0 +1,31 @@ +
+ + +
+ +# google.generativeai.types.ToolsType + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/TunedModel.md b/docs/api/google/generativeai/types/TunedModel.md new file mode 100644 index 000000000..dfe35d4b6 --- /dev/null +++ b/docs/api/google/generativeai/types/TunedModel.md @@ -0,0 +1,272 @@ +description: A dataclass representation of a protos.TunedModel. + +
+ + + + + + + + + + + + + + + + +
+ +# google.generativeai.types.TunedModel + + + + + + + + + +A dataclass representation of a protos.TunedModel. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`permissions` + + +
+`name` + +Dataclass field +
+`source_model` + +Dataclass field +
+`base_model` + +Dataclass field +
+`display_name` + +Dataclass field +
+`description` + +Dataclass field +
+`temperature` + +Dataclass field +
+`top_p` + +Dataclass field +
+`top_k` + +Dataclass field +
+`state` + +Dataclass field +
+`create_time` + +Dataclass field +
+`update_time` + +Dataclass field +
+`tuning_task` + +Dataclass field +
+ + + +## Methods + +

__eq__

+ + + +Return self==value. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+base_model + +`None` +
+create_time + +`None` +
+description + +`''` +
+display_name + +`''` +
+name + +`None` +
+source_model + +`None` +
+state + +`` +
+temperature + +`None` +
+top_k + +`None` +
+top_p + +`None` +
+tuning_task + +`None` +
+update_time + +`None` +
+ diff --git a/docs/api/google/generativeai/types/TunedModelNameOptions.md b/docs/api/google/generativeai/types/TunedModelNameOptions.md new file mode 100644 index 000000000..3f52c4a87 --- /dev/null +++ b/docs/api/google/generativeai/types/TunedModelNameOptions.md @@ -0,0 +1,25 @@ +
+ + +
+ +# google.generativeai.types.TunedModelNameOptions + + +This symbol is a **type alias**. + + + +#### Source: + + + + + + diff --git a/docs/api/google/generativeai/types/TunedModelState.md b/docs/api/google/generativeai/types/TunedModelState.md new file mode 100644 index 000000000..26b0d4bfe --- /dev/null +++ b/docs/api/google/generativeai/types/TunedModelState.md @@ -0,0 +1,703 @@ +description: The state of the tuned model. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +# google.generativeai.types.TunedModelState + + + + + + + + + +The state of the tuned model. + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`STATE_UNSPECIFIED` + +`0` + +The default value. This value is unused. +
+`CREATING` + +`1` + +The model is being created. +
+`ACTIVE` + +`2` + +The model is ready to be used. +
+`FAILED` + +`3` + +The model failed to be created. +
+ + + + + + + + + + + + + + + + + + + + + + + +
+`denominator` + +the denominator of a rational number in lowest terms +
+`imag` + +the imaginary part of a complex number +
+`numerator` + +the numerator of a rational number in lowest terms +
+`real` + +the real part of a complex number +
+ + + +## Methods + +

as_integer_ratio

+ + + +Return integer ratio. + +Return a pair of integers, whose ratio is exactly equal to the original int +and with a positive denominator. + +``` +>>> (10).as_integer_ratio() +(10, 1) +>>> (-10).as_integer_ratio() +(-10, 1) +>>> (0).as_integer_ratio() +(0, 1) +``` + +

bit_count

+ + + +Number of ones in the binary representation of the absolute value of self. + +Also known as the population count. + +``` +>>> bin(13) +'0b1101' +>>> (13).bit_count() +3 +``` + +

bit_length

+ + + +Number of bits necessary to represent self in binary. + +``` +>>> bin(37) +'0b100101' +>>> (37).bit_length() +6 +``` + +

conjugate

+ + + +Returns self, the complex conjugate of any int. + + +

from_bytes

+ + + +Return the integer represented by the given array of bytes. + +bytes + Holds the array of bytes to convert. The argument must either + support the buffer protocol or be an iterable object producing bytes. + Bytes and bytearray are examples of built-in objects that support the + buffer protocol. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Indicates whether two's complement is used to represent the integer. + +

to_bytes

+ + + +Return an array of bytes representing an integer. + +length + Length of bytes object to use. An OverflowError is raised if the + integer is not representable with the given number of bytes. Default + is length 1. +byteorder + The byte order used to represent the integer. If byteorder is 'big', + the most significant byte is at the beginning of the byte array. If + byteorder is 'little', the most significant byte is at the end of the + byte array. To request the native byte order of the host system, use + `sys.byteorder' as the byte order value. Default is to use 'big'. +signed + Determines whether two's complement is used to represent the integer. + If signed is False and a negative integer is given, an OverflowError + is raised. + +

__abs__

+ + + +abs(self) + + +

__add__

+ + + +Return self+value. + + +

__and__

+ + + +Return self&value. + + +

__bool__

+ + + +True if self else False + + +

__eq__

+ + + +Return self==value. + + +

__floordiv__

+ + + +Return self//value. + + +

__ge__

+ + + +Return self>=value. + + +

__gt__

+ + + +Return self>value. + + +

__invert__

+ + + +~self + + +

__le__

+ + + +Return self<=value. + + +

__lshift__

+ + + +Return self<__lt__ + + + +Return self__mod__ + + + +Return self%value. + + +

__mul__

+ + + +Return self*value. + + +

__ne__

+ + + +Return self!=value. + + +

__neg__

+ + + +-self + + +

__or__

+ + + +Return self|value. + + +

__pos__

+ + + ++self + + +

__pow__

+ + + +Return pow(self, value, mod). + + +

__radd__

+ + + +Return value+self. + + +

__rand__

+ + + +Return value&self. + + +

__rfloordiv__

+ + + +Return value//self. + + +

__rlshift__

+ + + +Return value<__rmod__ + + + +Return value%self. + + +

__rmul__

+ + + +Return value*self. + + +

__ror__

+ + + +Return value|self. + + +

__rpow__

+ + + +Return pow(value, self, mod). + + +

__rrshift__

+ + + +Return value>>self. + + +

__rshift__

+ + + +Return self>>value. + + +

__rsub__

+ + + +Return value-self. + + +

__rtruediv__

+ + + +Return value/self. + + +

__rxor__

+ + + +Return value^self. + + +

__sub__

+ + + +Return self-value. + + +

__truediv__

+ + + +Return self/value. + + +

__xor__

+ + + +Return self^value. + + + + + + + + + + + + + + + + + + + + + + + + +
+ACTIVE + +`` +
+CREATING + +`` +
+FAILED + +`` +
+STATE_UNSPECIFIED + +`` +
+ diff --git a/docs/api/google/generativeai/types/TypedDict.md b/docs/api/google/generativeai/types/TypedDict.md new file mode 100644 index 000000000..00a68a8e3 --- /dev/null +++ b/docs/api/google/generativeai/types/TypedDict.md @@ -0,0 +1,73 @@ +description: A simple typed namespace. At runtime it is equivalent to a plain dict. + +
+ + +
+ +# google.generativeai.types.TypedDict + + + + + + + + + +A simple typed namespace. At runtime it is equivalent to a plain dict. + + + + + + + + +TypedDict creates a dictionary type such that a type checker will expect all +instances to have a certain set of keys, where each key is +associated with a value of a consistent type. This expectation +is not checked at runtime. + +Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + +The type info can be accessed via the Point2D.__annotations__ dict, and +the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. +TypedDict supports an additional equivalent form:: + + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + +By default, all keys must be present in a TypedDict. It is possible +to override this by specifying totality:: + + class Point2D(TypedDict, total=False): + x: int + y: int + +This means that a Point2D TypedDict can have any of the keys omitted. A type +checker is only expected to support a literal False or True as the value of +the total argument. True is the default, and makes all items defined in the +class body be required. + +The Required and NotRequired special forms can also be used to mark +individual keys as being required or not required:: + + class Point2D(TypedDict): + x: int # the "x" key must always be present (Required is the default) + y: NotRequired[int] # the "y" key can be omitted + +See PEP 655 for more details on Required and NotRequired. \ No newline at end of file diff --git a/docs/api/google/generativeai/types/get_default_file_client.md b/docs/api/google/generativeai/types/get_default_file_client.md new file mode 100644 index 000000000..03aff0033 --- /dev/null +++ b/docs/api/google/generativeai/types/get_default_file_client.md @@ -0,0 +1,30 @@ +
+ + +
+ +# google.generativeai.types.get_default_file_client + + + + + + + + + + + + + + + + + diff --git a/docs/api/google/generativeai/types/to_file_data.md b/docs/api/google/generativeai/types/to_file_data.md new file mode 100644 index 000000000..5e263eb3b --- /dev/null +++ b/docs/api/google/generativeai/types/to_file_data.md @@ -0,0 +1,32 @@ +
+ + +
+ +# google.generativeai.types.to_file_data + + + + + + + + + + + + + + + + + diff --git a/docs/api/google/generativeai/update_tuned_model.md b/docs/api/google/generativeai/update_tuned_model.md new file mode 100644 index 000000000..b1f1dc9eb --- /dev/null +++ b/docs/api/google/generativeai/update_tuned_model.md @@ -0,0 +1,38 @@ +description: Calls the API to push updates to a specified tuned model where only certain attributes are updatable. + +
+ + +
+ +# google.generativeai.update_tuned_model + + + + + + + + + +Calls the API to push updates to a specified tuned model where only certain attributes are updatable. + + + + + + + diff --git a/docs/api/google/generativeai/upload_file.md b/docs/api/google/generativeai/upload_file.md new file mode 100644 index 000000000..7a18ba41a --- /dev/null +++ b/docs/api/google/generativeai/upload_file.md @@ -0,0 +1,105 @@ +description: Calls the API to upload a file using a supported file service. + +
+ + +
+ +# google.generativeai.upload_file + + + + + + + + + +Calls the API to upload a file using a supported file service. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+`path` + +The path to the file to be uploaded. +
+`mime_type` + +The MIME type of the file. If not provided, it will be +inferred from the file extension. +
+`name` + +The name of the file in the destination (e.g., 'files/sample-image'). +If not provided, a system generated ID will be created. +
+`display_name` + +Optional display name of the file. +
+`resumable` + +Whether to use the resumable upload protocol. By default, this is enabled. +See details at +https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http.MediaFileUpload-class.html#resumable +
+ + + + + + + + + + + + +
+`file_types.File` + +The response of the uploaded file. +
+ From 0f8f139e6a77b95aad58f001202cf82cdc230bf5 Mon Sep 17 00:00:00 2001 From: Guillaume Vernade Date: Mon, 22 Jul 2024 18:26:05 +0000 Subject: [PATCH 17/90] Using the `GEMINI_API_KEY` by default instead of the `GOOGLE_API_KEY` one (#418) * Using the GEMINI_API_KEY by default instead of the GOOGLE_API_KEY one The Google API key can be misleading since it's not really an overall key for all of Google API and only for the Gemini ones. * Formatting * Update google/generativeai/client.py * revert elif --------- Co-authored-by: Mark Daoust --- README.md | 2 +- google/generativeai/client.py | 12 +++++++++--- google/generativeai/types/discuss_types.py | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c0200f5b2..99d387bd7 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ See the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbo import google.generativeai as genai import os -genai.configure(api_key=os.environ["GOOGLE_API_KEY"]) +genai.configure(api_key=os.environ["GEMINI_API_KEY"]) ``` 3. Create a model and run a prompt. diff --git a/google/generativeai/client.py b/google/generativeai/client.py index 5d7b6996b..7e2193890 100644 --- a/google/generativeai/client.py +++ b/google/generativeai/client.py @@ -132,7 +132,8 @@ def configure( """Initializes default client configurations using specified parameters or environment variables. If no API key has been provided (either directly, or on `client_options`) and the - `GOOGLE_API_KEY` environment variable is set, it will be used as the API key. + `GEMINI_API_KEY` environment variable is set, it will be used as the API key. If not, + if the `GOOGLE_API_KEY` environement variable is set, it will be used as the API key. Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in `google.ai.generativelanguage` for details on the other arguments. @@ -141,8 +142,8 @@ def configure( transport: A string, one of: [`rest`, `grpc`, `grpc_asyncio`]. api_key: The API-Key to use when creating the default clients (each service uses a separate client). This is a shortcut for `client_options={"api_key": api_key}`. - If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be - used. + If omitted, and the `GEMINI_API_KEY` or the `GOOGLE_API_KEY` environment variable + are set, they will be used in this order of priority. default_metadata: Default (key, value) metadata pairs to send with every request. when using `transport="rest"` these are sent as HTTP headers. """ @@ -162,6 +163,11 @@ def configure( if api_key is None: # If no key is provided explicitly, attempt to load one from the # environment. + api_key = os.getenv("GEMINI_API_KEY") + + if api_key is None: + # If the GEMINI_API_KEY doesn't exist, attempt to load the + # GOOGLE_API_KEY from the environment. api_key = os.getenv("GOOGLE_API_KEY") client_options.api_key = api_key diff --git a/google/generativeai/types/discuss_types.py b/google/generativeai/types/discuss_types.py index a538da65c..05ad262f3 100644 --- a/google/generativeai/types/discuss_types.py +++ b/google/generativeai/types/discuss_types.py @@ -121,7 +121,7 @@ class ChatResponse(abc.ABC): ``` import google.generativeai as genai - genai.configure(api_key=os.environ['GOOGLE_API_KEY']) + genai.configure(api_key=os.environ['GEMINI_API_KEY']) response = genai.chat(messages=["Hello."]) print(response.last) # 'Hello! What can I help you with?' From 5b31be7ff74aa0e6eb41a13d619ad8f116a4e1fd Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Mon, 22 Jul 2024 17:18:05 -0700 Subject: [PATCH 18/90] Add REST embeddings & system_instruction sample (#429) * Add REST embeddings sample * Add system_instruction shell script as well * Update region tags --- samples/rest/embeddings.sh | 32 ++++++++++++++++++++++++++++++ samples/rest/system_instruction.sh | 13 ++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 samples/rest/embeddings.sh create mode 100644 samples/rest/system_instruction.sh diff --git a/samples/rest/embeddings.sh b/samples/rest/embeddings.sh new file mode 100644 index 000000000..26fa11d44 --- /dev/null +++ b/samples/rest/embeddings.sh @@ -0,0 +1,32 @@ +set -eu + +echo "[START embed_content]" +# [START embed_content] +curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{"model": "models/text-embedding-004", + "content": { + "parts":[{ + "text": "Hello world"}]}, }' 2> /dev/null | head +# [END embed_content] + +echo "[START batch_embed_contents]" +# [START batch_embed_contents] +curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:batchEmbedContents?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{"requests": [{ + "model": "models/text-embedding-004", + "content": { + "parts":[{ + "text": "What is the meaning of life?"}]}, }, + { + "model": "models/text-embedding-004", + "content": { + "parts":[{ + "text": "How much wood would a woodchuck chuck?"}]}, }, + { + "model": "models/text-embedding-004", + "content": { + "parts":[{ + "text": "How does the brain work?"}]}, }, ]}' 2> /dev/null | grep -C 5 values +# [END batch_embed_contents] \ No newline at end of file diff --git a/samples/rest/system_instruction.sh b/samples/rest/system_instruction.sh new file mode 100644 index 000000000..6a32c8f58 --- /dev/null +++ b/samples/rest/system_instruction.sh @@ -0,0 +1,13 @@ +set -eu + +echo "[START system_instruction]" +# [START system_instruction] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{ "system_instruction": { + "parts": + { "text": "You are a cat. Your name is Neko."}}, + "contents": { + "parts": { + "text": "Hello there"}}}' +# [END system_instruction] \ No newline at end of file From d3ca154c589b0f8ac547863dce61bded2f4cfc71 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Mon, 22 Jul 2024 17:18:14 -0700 Subject: [PATCH 19/90] `text_generation` samples for shell (#430) * Start on text_generation samples for shell * Add example for one image in text gen * Add streaming example for one image * Adding rest of text generation examples * change to gemini-1.5-flash * Add updates to text generation scripts * Using file api to upload audio and video * Delete audio_output.txt * Debugged audio example * Uploading videos now working for text generation * Delete file_info.json * Remove stray tag. --------- Co-authored-by: Mark Daoust --- samples/rest/text_generation.sh | 247 ++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100644 samples/rest/text_generation.sh diff --git a/samples/rest/text_generation.sh b/samples/rest/text_generation.sh new file mode 100644 index 000000000..fc2d7b9a0 --- /dev/null +++ b/samples/rest/text_generation.sh @@ -0,0 +1,247 @@ +set -eu + +SCRIPT_DIR=$(dirname "$0") +MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) + +IMG_PATH=${MEDIA_DIR}/organ.jpg +AUDIO_PATH=${MEDIA_DIR}/sample.mp3 +VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4 + +BASE_URL="https://generativelanguage.googleapis.com" + +if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then + B64FLAGS="--input" +else + B64FLAGS="-w0" +fi + +echo "[START text_gen_text_only_prompt]" +# [START text_gen_text_only_prompt] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[{"text": "Write a story about a magic backpack."}] + }] + }' 2> /dev/null +# [END text_gen_text_only_prompt] + +echo "[START text_gen_text_only_prompt_streaming]" +# [START text_gen_text_only_prompt_streaming] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=${GOOGLE_API_KEY}" \ + -H 'Content-Type: application/json' \ + --no-buffer \ + -d '{ "contents":[{"parts":[{"text": "Write a story about a magic backpack."}]}]}' +# [END text_gen_text_only_prompt_streaming] + +echo "[START text_gen_multimodal_one_image_prompt]" +# [START text_gen_multimodal_one_image_prompt] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Tell me about this instrument"}, + { + "inline_data": { + "mime_type":"image/jpeg", + "data": "'$(base64 $B64FLAGS $IMG_PATH)'" + } + } + ] + }] + }' 2> /dev/null +# [END text_gen_multimodal_one_image_prompt] + +echo "[START text_gen_multimodal_one_image_prompt_streaming]" +# [START text_gen_multimodal_one_image_prompt_streaming] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Tell me about this instrument"}, + { + "inline_data": { + "mime_type":"image/jpeg", + "data": "'$(base64 $B64FLAGS $IMG_PATH)'" + } + } + ] + }] + }' 2> /dev/null +# [END text_gen_multimodal_one_image_prompt_streaming] + +echo "[START text_gen_multimodal_audio]" +# [START text_gen_multimodal_audio] +# Use File API to upload audio data to API request. +MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}") +NUM_BYTES=$(wc -c < "${AUDIO_PATH}") +DISPLAY_NAME=AUDIO + +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Please describe this file."}, + {"file_data":{"mime_type": "audio/mpeg", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END text_gen_multimodal_audio] + +echo "[START text_gen_multimodal_video_prompt]" +# [START text_gen_multimodal_video_prompt] +# Use File API to upload audio data to API request. +MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}") +NUM_BYTES=$(wc -c < "${VIDEO_PATH}") +DISPLAY_NAME=VIDEO + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +state=$(jq ".file.state" file_info.json) +echo state=$state + +name=$(jq ".file.name" file_info.json) +echo name=$name + +while [[ "($state)" = *"PROCESSING"* ]]; +do + echo "Processing video..." + sleep 5 + # Get the file of interest to check state + curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json + state=$(jq ".file.state" file_info.json) +done + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Please describe this file."}, + {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END text_gen_multimodal_video_prompt] + +echo "[START text_gen_multimodal_video_prompt_streaming]" +# [START text_gen_multimodal_video_prompt_streaming] +# Use File API to upload audio data to API request. +MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}") +NUM_BYTES=$(wc -c < "${VIDEO_PATH}") +DISPLAY_NAME=VIDEO_PATH + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +state=$(jq ".file.state" file_info.json) +echo state=$state + +while [[ "($state)" = *"PROCESSING"* ]]; +do + echo "Processing video..." + sleep 5 + # Get the file of interest to check state + curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json + state=$(jq ".file.state" file_info.json) +done + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Please describe this file."}, + {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo +# [END text_gen_multimodal_video_prompt_streaming] \ No newline at end of file From 99e5a11b552d0bc7810ceec345e609f493e1ab51 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Tue, 23 Jul 2024 10:58:32 -0700 Subject: [PATCH 20/90] Added curl examples for files (#480) * Added curl examples for files * Update files.sh * update files.sh --- samples/rest/files.sh | 251 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 251 insertions(+) create mode 100644 samples/rest/files.sh diff --git a/samples/rest/files.sh b/samples/rest/files.sh new file mode 100644 index 000000000..ae44b7467 --- /dev/null +++ b/samples/rest/files.sh @@ -0,0 +1,251 @@ +set -eu + +SCRIPT_DIR=$(dirname "$0") +MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) + +TEXT_PATH=${MEDIA_DIR}/poem.txt +IMG_PATH=${MEDIA_DIR}/organ.jpg +IMG_PATH_2=${MEDIA_DIR}/Cajun_instruments.jpg +AUDIO_PATH=${MEDIA_DIR}/sample.mp3 +VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4 + +BASE_URL="https://generativelanguage.googleapis.com" + +echo "[START files_create_text]" +# [START files_create_text] +MIME_TYPE=$(file -b --mime-type "${TEXT_PATH}") +NUM_BYTES=$(wc -c < "${TEXT_PATH}") +DISPLAY_NAME=TEXT + +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${TEXT_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Can you add a few more lines to this poem?"}, + {"file_data":{"mime_type": "text/plain", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json + +echo "[START files_get]" +# [START files_get] +name=$(jq ".file.name" file_info.json) +# Get the file of interest to check state +curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json +# Print some information about the file you got +name=$(jq ".file.name" file_info.json) +echo name=$name +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri +# [END files_get] + +echo "[START files_delete]" +# [START files_delete] +curl --request "DELETE" https://generativelanguage.googleapis.com/v1beta/files/$name?key=$GOOGLE_API_KEY +# [END files_delete] + +# [END files_create_text] + +echo "[START files_create_image]" +# [START files_create_image] +MIME_TYPE=$(file -b --mime-type "${IMG_PATH_2}") +NUM_BYTES=$(wc -c < "${IMG_PATH_2}") +DISPLAY_NAME=TEXT + +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${IMG_PATH_2}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Can you tell me about the instruments in this photo?"}, + {"file_data": + {"mime_type": "image/jpeg", + "file_uri": '$file_uri'} + }] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END files_create_image] + +echo "[START files_create_audio]" +# [START files_create_audio] +MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}") +NUM_BYTES=$(wc -c < "${AUDIO_PATH}") +DISPLAY_NAME=AUDIO + +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Describe this audio clip"}, + {"file_data":{"mime_type": "audio/mp3", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END files_create_audio] + +echo "[START files_create_video]" +# [START files_create_video] +MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}") +NUM_BYTES=$(wc -c < "${VIDEO_PATH}") +DISPLAY_NAME=VIDEO_PATH + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +state=$(jq ".file.state" file_info.json) +echo state=$state + +# Ensure the state of the video is 'ACTIVE' +while [[ "($state)" = *"PROCESSING"* ]]; +do + echo "Processing video..." + sleep 5 + # Get the file of interest to check state + curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json + state=$(jq ".file.state" file_info.json) +done + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Describe this video clip"}, + {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END files_create_video] + +echo "[START files_list]" +# [START files_list] +echo "My files: " + +curl "https://generativelanguage.googleapis.com/v1beta/files?key=$GOOGLE_API_KEY" +# [END files_list] \ No newline at end of file From 353dc4fe860ec5a2f401c290e4fb5c580bcb4ed2 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Tue, 23 Jul 2024 10:58:52 -0700 Subject: [PATCH 21/90] Add other functions to count_tokens (#482) * Add other functions to count_tokens * Tested count_tokens --- samples/rest/count_tokens.sh | 143 ++++++++++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 1 deletion(-) diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh index 867e787b8..5d4f08d14 100644 --- a/samples/rest/count_tokens.sh +++ b/samples/rest/count_tokens.sh @@ -1,5 +1,21 @@ set -eu +SCRIPT_DIR=$(dirname "$0") +MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) + +TEXT_PATH=${MEDIA_DIR}/poem.txt +IMG_PATH=${MEDIA_DIR}/organ.jpg +AUDIO_PATH=${MEDIA_DIR}/sample.mp3 +VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4 + +BASE_URL="https://generativelanguage.googleapis.com" + +if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then + B64FLAGS="--input" +else + B64FLAGS="-w0" +fi + echo "[START tokens_text_only]" # [START tokens_text_only] curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \ @@ -29,4 +45,129 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:co }, ], }' -# [END tokens_chat] \ No newline at end of file +# [END tokens_chat] + +echo "[START tokens_multimodal_image_inline]" +# [START tokens_multimodal_image_inline] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Tell me about this instrument"}, + { + "inline_data": { + "mime_type":"image/jpeg", + "data": "'$(base64 $B64FLAGS $IMG_PATH)'" + } + } + ] + }] + }' 2> /dev/null +# [END tokens_multimodal_image_inline] + +echo "[START tokens_multimodal_image_file_api]" +# [START tokens_multimodal_image_file_api] +MIME_TYPE=$(file -b --mime-type "${IMG_PATH}") +NUM_BYTES=$(wc -c < "${IMG_PATH}") +DISPLAY_NAME=TEXT + +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${IMG_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Can you tell me about the instruments in this photo?"}, + {"file_data": + {"mime_type": "image/jpeg", + "file_uri": '$file_uri'} + }] + }] + }' +# [END tokens_multimodal_image_file_api] + +echo "# [START tokens_multimodal_video_audio_file_api]" +# [START tokens_multimodal_video_audio_file_api] + +MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}") +NUM_BYTES=$(wc -c < "${VIDEO_PATH}") +DISPLAY_NAME=VIDEO_PATH + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +state=$(jq ".file.state" file_info.json) +echo state=$state + +name=$(jq ".file.name" file_info.json) +echo name=$name + +while [[ "($state)" = *"PROCESSING"* ]]; +do + echo "Processing video..." + sleep 5 + # Get the file of interest to check state + curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json + state=$(jq ".file.state" file_info.json) +done + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Describe this video clip"}, + {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}] + }] + }' +# [END tokens_multimodal_video_audio_file_api] \ No newline at end of file From f8b049f813ac0a926aa480a2282371ee67192739 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 23 Jul 2024 13:42:07 -0700 Subject: [PATCH 22/90] Add pdf samples (#484) * Add pdf samples Change-Id: I835c4805081af3aa6ce26a8871a62b5c435f18bf * Fix streaming video Change-Id: Iec0000da192231a7a5f97faabaeae9d3ebe64475 * format Change-Id: I51705e0f3b96d825952a3183bc55cbed5cb158c0 --- samples/count_tokens.py | 11 ++++++++ samples/files.py | 8 ++++++ samples/text_generation.py | 50 ++++++++++++++++++++++++++++++++++--- third_party/LICENSE.txt | 3 +++ third_party/test.pdf | Bin 0 -> 821662 bytes 5 files changed, 68 insertions(+), 4 deletions(-) create mode 100644 third_party/test.pdf diff --git a/samples/count_tokens.py b/samples/count_tokens.py index beae3b288..74a9e4881 100644 --- a/samples/count_tokens.py +++ b/samples/count_tokens.py @@ -167,6 +167,17 @@ def test_tokens_multimodal_video_audio_file_api(self): # [END tokens_multimodal_video_audio_file_api] + def test_tokens_multimodal_pdf_file_api(self): + # [START tokens_multimodal_pdf_file_api] + model = genai.GenerativeModel("gemini-1.5-flash") + sample_pdf = genai.upload_file(media / "test.pdf") + token_count = model.count_tokens(["Give me a summary of this document.", sample_pdf]) + print(f"{token_count=}") + + response = model.generate_content(["Give me a summary of this document.", sample_pdf]) + print(response.usage_metadata) + # [END tokens_multimodal_pdf_file_api] + def test_tokens_cached_content(self): # [START tokens_cached_content] import time diff --git a/samples/files.py b/samples/files.py index f5cbfdc0a..cbed68a1e 100644 --- a/samples/files.py +++ b/samples/files.py @@ -75,6 +75,14 @@ def test_files_create_video(self): print(f"{result.text=}") # [END files_create_video] + def test_files_create_pdf(self): + # [START files_create_pdf] + model = genai.GenerativeModel("gemini-1.5-flash") + sample_pdf = genai.upload_file(media / "test.pdf") + response = model.generate_content(["Give me a summary of this pdf file.", sample_pdf]) + print(response.text) + # [END files_create_pdf] + def test_files_list(self): # [START files_list] print("My files:") diff --git a/samples/text_generation.py b/samples/text_generation.py index c4d6adccb..aad0916f7 100644 --- a/samples/text_generation.py +++ b/samples/text_generation.py @@ -96,6 +96,17 @@ def test_text_gen_multimodal_audio(self): print(response.text) # [END text_gen_multimodal_audio] + def test_text_gen_multimodal_audio_streaming(self): + # [START text_gen_multimodal_audio_streaming] + model = genai.GenerativeModel("gemini-1.5-flash") + sample_audio = genai.upload_file(media / "sample.mp3") + response = model.generate_content(["Give me a summary of this audio file.", sample_audio]) + + for chunk in response: + print(chunk.text) + print("_" * 80) + # [END text_gen_multimodal_audio_streaming] + def test_text_gen_multimodal_video_prompt(self): # [START text_gen_multimodal_video_prompt] import time @@ -111,20 +122,51 @@ def test_text_gen_multimodal_video_prompt(self): myfile = genai.get_file(myfile.name) model = genai.GenerativeModel("gemini-1.5-flash") - result = model.generate_content([myfile, "Describe this video clip"]) - print(f"{result.text=}") + response = model.generate_content([myfile, "Describe this video clip"]) + print(f"{response.text=}") # [END text_gen_multimodal_video_prompt] def test_text_gen_multimodal_video_prompt_streaming(self): # [START text_gen_multimodal_video_prompt_streaming] + import time + + # Video clip (CC BY 3.0) from https://peach.blender.org/download/ + myfile = genai.upload_file(media / "Big_Buck_Bunny.mp4") + print(f"{myfile=}") + + # Videos need to be processed before you can use them. + while myfile.state.name == "PROCESSING": + print("processing video...") + time.sleep(5) + myfile = genai.get_file(myfile.name) + model = genai.GenerativeModel("gemini-1.5-flash") - video = genai.upload_file(media / "Big_Buck_Bunny.mp4") - response = model.generate_content(["Describe this video clip.", video], stream=True) + + response = model.generate_content([myfile, "Describe this video clip"]) for chunk in response: print(chunk.text) print("_" * 80) # [END text_gen_multimodal_video_prompt_streaming] + def test_text_gen_multimodal_pdf(self): + # [START text_gen_multimodal_pdf] + model = genai.GenerativeModel("gemini-1.5-flash") + sample_pdf = genai.upload_file(media / "test.pdf") + response = model.generate_content(["Give me a summary of this document:", sample_pdf]) + print(f"{response.text=}") + # [END text_gen_multimodal_pdf] + + def test_text_gen_multimodal_pdf_streaming(self): + # [START text_gen_multimodal_pdf_streaming] + model = genai.GenerativeModel("gemini-1.5-flash") + sample_pdf = genai.upload_file(media / "test.pdf") + response = model.generate_content(["Give me a summary of this document:", sample_pdf]) + + for chunk in response: + print(chunk.text) + print("_" * 80) + # [END text_gen_multimodal_pdf_streaming] + if __name__ == "__main__": absltest.main() diff --git a/third_party/LICENSE.txt b/third_party/LICENSE.txt index bd47e3837..6d50e78f4 100644 --- a/third_party/LICENSE.txt +++ b/third_party/LICENSE.txt @@ -8,3 +8,6 @@ * This is the first paragraph from Shakespeare's "spring", public domain. * Cajun_instruments.jpg * This image is from Wikimedia Commons, a public domain (https://commons.wikimedia.org/wiki/Category:Musical_instruments#/media/File:Cajun_instruments.jpg). +* test.pdf + * This is the first 2 pages of https://arxiv.org/abs/2403.05530 by Google Gemini Team. + * License: CC-BY 4.0 \ No newline at end of file diff --git a/third_party/test.pdf b/third_party/test.pdf new file mode 100644 index 0000000000000000000000000000000000000000..25de0918fe997f2f90b6aa5dbb5ee7a6d0a2e098 GIT binary patch literal 821662 zcmbSzcQ}>*|39ZA9A)pqF(Z3rBu=)>Y_fOAmOYLlTSm4>ArT^b9eajsg=9!MPadGoQMMWXbE>7mgk09>orK!ty z^fHgz>Ni=0(Fs)$MF~E}0=Lmg*M+NtYL^i`RwXn%Vg3@I3Ed>`*{QxW=aPnX^SgAO z$BeO`RpO+IyC0S9XjxWy)F_0->@(>)kj*i^CeuB+<@w0JtZDG|am!_kO+on$hTM42 zqFciKd(O#hwv}soZycH0HyM=r=Vl4pIsKF}8LEzG4_2)XhAhe_>^U=abQwSMg^-;# zI$yTV_Nxo-=Sf}x9zRWYTXy?;?7CP{o%nYB`%c9~rLUV7t1!oFmg7@OeP5#ElZ}Dz z=|{px9Xv|0BEo^E#(t%_crpzb&-0F7_@`D(SUySo=?bfpk6Np|gYJ+H(e+%ut%oTXm4XL*<9b86;r&7cW4E@T(pHp1tB9d= zO+=X4I%}$<2LbzrRh7;!1B@+iS2wp`Nu~o?LQc<>hB0Rv8HDr2=&(%{^jaTcQ#AiN z;nrbr)YX9Lws!SjN){%4$wc;f?%VZ}B(yJ$>2+@xYHo6bv0b{RTL#j2NZ`@I($CXF z7KW~0H;F?qT{ZJ**qvFCX~5IjPmAWN%otsA-Lou6A8FO(NJ^81{m*1N`?i#9427Pl z)ukU5jz9Cq7#>Uy=0zl{T_uR!=u8}BU&+qMePPc3Rz{WkyPenJW(z}ye^Wz?L_Mns z4cbvbosBSUpWPFIugllw*YFX~W1r4F-o)*F~`*C8YY5mn?w{8|=X74SU6@l%~xqRDBH`%#VUMOYT6y~fRpW5q_ktlY5hhuLsvTa>WmcH>i z$+`Mjgc@9JRkMw##Ya>z#v|1zF)iz7*kPt_CQd)PqA%A2x-zoGF2=?sMQkQ|+tIZ; zm$EEw#)GS;2$={LZ2jlWG-mI&s&f_;FDJ-yGIJm0+tYM^9QzVkaE0{QxEsrjilbGu z*fCpn2iJ1n(T2{e&tE>auD)+CyQ?DbUba#_frZj_|DjjutAtBsyy~`ZqP5_{>-#Qn z;=Kn_kLg&Q6HPvJ&xo*Gc6rF3)W@jEEhWRs!)4NK55p8UD~qvS`IPaX{MGCZ4@ z7E=_kQTyC=4^lhll@gVE$+~G(ExgzBqf0H{zz19i2G>(Q&VXn z7R`TF(2bHlw3QT5DB~B3{vuq{-%O%r>$WTYj77 z{Z;%he?RqL!`HYFx|re8gjZI65jsj+BO=N&El;ts6gH#Wos|^k;d3Y6Uin(5BMzg_ zg^6z>7+*vz?VutbnG>-Z@OSM4?_l|yn+qPB9jeR=tVS1iv?9b00qyte8te~MRam)&|a zonK7-Rp?V3>?vl1^libTfjLPb+0zas^7n&Rut%{9rEuaIag#p(Fn!$@Y-7XZpF0Bh z3YW3C6Nf$*?xssp75RL#)eKut$QR|;61^fR8RIvo<_ypm3*VZ#dhi}U(l{PS!Ff~y z+Qr-5iBk+v#JkEWkr?D&b!$as;CXlb)!IO@Rs5P__ZN;#RJt?54#(LmLU;H=NHgIT z;x*LsHY2IbW!o@e_zz~E1-B2w^G!PrS57E4J45@FBQuvka7T9p6%o!vDy>39t~TjI zjYvXvxfv6oDk>aV5NIE_|X$x){D2AOxcf(mXgVkBL zC4d2x74)TlXL|?#`IR>CW3#spM)h#oNB5=)H>T+MT-JiVig3So>7u?$v%Srm$(dW} zksP&;5E6BW*SJLAk8k1^L73FQ$$A%I0F|G42F9IEO9&BaoTk%XvRsgRGUrn=bYRFI zM)~bq@a{zTh!JQ92RF=b^?h(WgW0Gtsrc!{JuHhDwCnZ$pF6N7?q-x|8Y5`5xGGkS zvCM;(wO$Q{(9cj$l%dz$p`!+kQrvski=c$H9^V#gqf7%V8e!d@iILnW16ci#Rg^`~*&;eO?$wkuKnN(Il-fH&v|&L7q(+7*IK7jfd)00kY_oH`?1|mv6-6hr zSS|R-a4(&+ug%mZy86V^FMkylDIzVzkI@nFYQeDpoJkXAjax`AyEo(9{0e$oLsCbh z(`o1MNQjx^1kOZ+2uU z=?!Myuf57vbiLO+ZJ(IAGWuO&sIb2#zth$1Cfql1JO4*m6GYK6DSsiW*V$S+VSO<6 z7o@OwY=~2orto_yLk|N!_P}+_=bL7sVCwtWuO*GQnWukTqgaAJ4bR&|dl0kL7h}#G zYLTZCWMPz7_)+X<#u8zbpjbkhTolnG*)@0BDV_xRZO#|a?`dE1W4mH{#H%tsEZJVa zf+_NJ@`hQE+ikNQ-p4M5>SgaYCRHgiGz};UN1sF!rIpp?$%%#~IpF6@efcm(0Bes$ zn>D%*U$WIzsnlg=7nQ~#`G6m*rh7#-o(PBaHjBJ?-qJg!2hI$aBpws3sd+6V-8jO% zyc19LlsdNc;b8E@4||T4vhP~7-{@MO72A-d<$V&V{Rn0qWxHow;BEF=4?nA|Df^`3 z(9y0NuXK-MoF=j~mwNsuD4e~uF*U{T?snWh9jA13waQzf{Ci!C2-AY_1>XC*D{*TQ zw|uNWyuRbgmQK0ixbfX*Pu|3OSiA5^ee3HZvw;VPc^Z#2&|KH@&6IL|y(&d>`1q^z zZ?dkN^iUU128AtE(aEp>jt8ZjEV#%b#`*llrHf<9EChzrP z`a6bStSdWhdgEW$8!Gx4gi9S_S#%G)@_92p8TC3RL9IVOCr!OTe=!R&KG z__u9G!=(C(dI-!<283nSeljUDy}--=K4bopWCo&Lr$bar6ySLsNnZ%6MLS z@3!Y5g~K_8?v)$Q2t2zp(_9kGZ!|5wAd}GkE=1t6CR3v(XaS0)>%qcP4$RUSmFeTk z3d_J)4XG5cu1PvsQvOCgsaHeDGuYw$ike+jv)~JP3Nh`*jmW@HW3L4qw>IfAXw}h+ zd}wdH5_oB>7`0p;u_8MePMSV*6>8KbpjC4Di;+jwk{sspW91O-p5VK7c96`zxZ)n; zW9&A)=7YH92?}&y!QSyh_$F~B`4m;qn5SF7or2ZpKSF48$+$8`^(AiyC8{_&NRd>c zQ;|i|rKWIBElQ6wN4hqYRi&GJfe=O(e0;n}SxkuD)cy3pktZv}cPUG%F_e^{NJyR1 zkGtnDZ^m6YgmC+WtR-@01b@A95&xFQ(c(uEr>CNi+cu*c=CRZtpVeLeT4v@> z23xn6$^(D((eF9n5s+|#eSaS745wqkVp{xSLwtsvI#FvWCJTZ|d*CFg(NyuMsqnb; zw&bh$$X89Q#rqy!^ugCO5IFc1zRxDMHZe+IT`w5hMFixpj!!&;JfB~^3z@yk{px=D zOnvwptyIE=V8(~>&;9IjKfe#FsD%?nGGpg)Xb+fjqAipsNhOp%ZDPw-7r3e+6dY&Y zPIg-@Kiq2cRVQuX#~_}h9^S^|*F*1r+<&`7taE^sVo2(?RJw3whWJ>?wWlRQ;4U;) z401^yamL&NDY zYw6HosB^OVesQ^|Opq&dqhoL@W|utq^61Z%UrG&QS8rH zdw(YH!i7?(r>zVLXVX3n&ekX~DZI0`8&+RE9m|#+XO{eEPY|Sot99gE-=aTF8~pCY z$n`4`9P}lbmyBsuHlSt6!$x|7eJLjSVtF=?-Va;rtPw@&fHD2LP4ftd+rrk^J;*t+G}X&HTSC{3{{jcZ{1>H zbN1gMZ%E={PPOK2@(@pnliuEm%~f~XV4sbS^^zEdRsh(>NjJA?bHAC$IbIJ z^Y|QLnHb8~Hg=upNpCl<_7nFc2eWQ=QKM(}w+e1PRT`~1OzniS3L47acf5|1o^oJL zt%Xg}!p&w>nwL=W<&ybx4Q9I3#PtY2i;aa%I;Dy*zADF$4K(If1b%imv^!M|{Muy^ zgr|2|C!1SuC)X~MS}iNP*@$mAbqCFLF>oyGVZLsra0<$c6fs_p(%JUzn1~@xc&IM; z{fbCLWR=o5Mnb~aUcmG&sPLifw>~C@Q>fg`@y9RwA=IA|-DpH83k()RJ96bETDM;n z9K#VYB?%2IURXk*4{EqrU*$@9n?zEty;I7;4~k8B6?X70Do{VRhl1|Kly1&on)pDq zujm(ut0;30ciN44gW`jyB@!o#aWAB=LY>>(CmRvMnQJu@VdFX1lNiDkY4H5&LE4E$`==(~ z;RtpyN6fpO>oAAY*bh7w0W~CZT;E+2oSXe6;Zpkz`#KleXEA@Kly_P7keT%wckWv_cZc)>5TJ!+U8_+=cKSvYjLJ(QA-T-HCFtAJdB-dhh*yacXGCK4aVU zhy5PyWij)q`9#MpOEu2hT5GngW${E$Z{Ft*)f^8ytqP9kATnCenXDEczL(IL{?i=1 zLiuU9a@;NeN9j5b{-sR{9{C`;E2A&TKFM8idRDcV&cZ(RUFbgKM$O6fEUuuioAjnS z#x&9&XnppabV3@2iJHBYj``Rozf}pPGDGPDMc#$f@VkvF4C;KPo#|e|XrFBm)~i~j zu9lRXRT%}pdUkjxT7rPh+X^q?%CFsZY5LhB$8V`OnYe@Jh$XD4>YfY0zTNFq))$^c z;K&Y;&+Uzaes(dY-mgRVF#H)MV7Bz6c9m52^g->h7REU5_XH5RuS2&) z3xn6ET$ah|7@O>e>=iLQV!01>>~(IW_u{FQ8jY4SCJrgmVUPbTj1pk-4#?J5r)D>K zyCG1L#Ar&6siUwsv|BKra^3O)ULbwh!WDe&(;mO2BUHHD7EJLbgKT2X zdLJ>(F*7!Z1#aV9hWt*+R=sA@}8i7sziyR{&BNUXTug&g4RV|p` zb64bebVu+^uxwf{>u~HW9&|AdVXa72&tV!o=bY$>58-Uae_W5P#h8lyg5dG)0B=o| zgX;a=6UO7=5B8VSuYc^#(mJLk@KVZaRFd0z#c_Op{M)TX&mZjb+*i!6PfgQHN0~6| zwSJ5!vu$d<+xp!yf3-E0K!_ckW+gf2@FO^r_7`cuN}m)3>wbNWfWRp3eVyS>(=EKO zHn*R1q>rWr2q%b%%`>PaW%Ttt91wjcp;0}juv?l-#&%O};>M@XY~y5B2{^cAM?@6w zT&d^xO?5J!K6%HyqV|i{eBv|SFURJDAv}o+!gm(lhnU0EPr8?%eKg*){P8S$n`OXk zd~v5sER#l64_Ah;|FI|W=DX)^^l-%NUP%BS|K%<+@52ODe!)eG?)VgT;<*$Ywyy*7 zxr~;d0jK}T9FaY`^6c5@Hg%xcOR&~hVa0UNE`DT*b05#YS^cUZm2{^CMc|P z9DVVTe#1Z@z=O!S$x{d0r4CzvXjn^66CKxOLQB`t`BPSLamKD{#I;9jHHhX4E4(&X zP`7h8(i6Pm%l?z7G;w6OsW)exL~JEzF$~V|?63>p*Bf)$^zAk5HH(&zaJCoDi|ori zwd|re^JalF`W88R`DtVdnh3j~65=TuGN+!{cUdaa zk2)mv2jbV7*BA#HHRpLe-(N}ig`mcaRgpLRnDEJ3e^X1r$n5iti680XBQdjezvo7oln3}(70Rh_ z-9GdJ`Wk^u;C%L5fe-U5_>od|%vvp3W|xS;}Pk2%#nc%VXmZuy|xNXY@t)je)O zxqnNc?jZo-{w<-phaeQ#y7;rAwz7$}x#{_$a*w#7e}B0U4EgAfVCSm*^AV?-tBK2* za?eDAa%w#^1NKIE1-ZGPoU-N*Ev;Oj{D96l}&(&K9d{DDgMyKS;gE*!v2wiy&ckGf`7MO1bBq_fc4LJXCVLBok87@`8nS8fw$Hv z>pQPitS+uB(?@hlD>|83`D?Ucp5rAi`RE@@N~Kk0`q$)x4O%E1$nJ-{z(YhG$vr(; z$2-A;KY;rk7oMi<)qm9%fun)2p#J~kWdjr<6MdK+PsRxS|M(gQdn*kxTyOJ=@r?!; zABKkhzg>zkw(!Wk7`inregC%)&q!qxt_f~EsGV2GQQ-aG1;=`dr7k9QJxR>$ZX)=9 z6U0K*Mue~=Y@xQ7KAC}<{TcMrIvfwPb799LP zS{zUntPr7U`2-{7m%hNt{|9xL5^SwzWjcDc3=96h8y(+@RQrgl$o=qBS3B(g!|WRv z&B_aetI+55rn3Jl+5y=S5s2?US^b6yD|9qC=>PWxp3|!=3vxV6$8g%gFQL2mo+}q| zdi3oyukVyeAN)o9zuT0VY0Z1|g{3?W@z=0ssf+hv8X0{AbB5{~QckoN%m3S5KApAln5X`#1{K@wJ z;9T3_Y$0cP`U%kXWA+e&i$Ks$Iu+-%1K|?;@4>~t0Ji%|(=JsN=9Uw41;wTApewN~ zzsC;wKR|5*BU{8wUX2N~UmXiZVOt#^U?Wh2j{d*10ZaAoxT_i3&5mDAm4to42D})Q zs>)IA0+zWj)xX_wktQA*u}>u7kgtk$q8O1Y0Wivfu5#7gfBW)6bBhFAV)geLT*02V zuJ59R^&)>@4Ki&rUCIBxMg@uX0vCH)6=Zyc>*dALFe42>rG-@+TM_?;iNA2m=a}-e zcrlFw$v+9r^g{v1iH^To=jqP|LliZmu=#fZ7i2BN)Z0uyqVYi9%2|1dI9i7Fx>xu2EP$;>4dh zia-T_@!O|yl+XdY3!sBckB{#E5FJIZnNwvLFM-@Z%3cWzDC$U*1>ix%&h#FI>wgpM zXlBKVd4!;kU@9>50+0v@01gA=7fS^HW=|+=>f7mzSo$g1b2!U6ZEZc+!j+qH_*Mkk zM;P*l7up@bak%Gkz?gj(1;Qxo0?u&*zQEhj+bb-U$#$+hR-BL5A?8;=)*#k;izTYR zTo`!*`cbZL=KI&oYs9hU4cZ?lnIB_5e+Ll&vR+W?c!IbA$1p7UcoCF~f|1^=WA)`< zy_uYB&*f-uU~1h~{oI3%bl}QucPYaC^AW|I#*`P~C{Y`!a!!u-wEi%5xU!fox4TNl z@FbKS1q%Z>S9}`oU>FtX<}kC%xq~?{zU=ySw|70CCNnj|EeTG+ z$3wn31$Ef76j|?;C<-|p6tlS}4weQp`L6e0cnCrj2{h}I1Ix24`x5^qR;tA z-9mxNJ*+FfOak?_U`kp7D>5#>h%2PF$^~3bMPm(LNxFe7U269Z%_xrziH0eX`?l;s60wD@%}N;rej!pD2p7RGQs6Oh2Q;G4M0 z^&81LmawWJPXEZua%Yfzi9tNcf5IcOkUBMXb3wi%#6JW9F4WPzi16Ba02IRP(Y^ra zph8dMNW)bR;7KMLwP zK>;*VFDl!EVAcLpVasH>cl@Sh`Ku%$+0w`)W<}iRv1)Zi&r$n*Y}Okogi6LVPJA~C zppLK|?j@xml^l$<=na>PAxDpnkhy|lA7ZeH=u^Kmsk=c}u)?vtCpL{|WRgQG1;VEX zB)MyOagfZw$`Lt!L;9F*O%0mno=Q0!Iojj_?{f z9=s{QA!(_9&Bx3k5n!-(j7|=VtY+{v%%aV zIxN}bnL3?j^BGzNv96y~DjjX&=dYyu-Vn(RM}R@DhF=&iqMEiMfUmswg2#0J{`s(J zfp~@YoX~1}>>v~l161i;>)73PXQaxC|$sk>Z>gH#AY!{`*xb;We#R#T|5 zLvS1^{u(Kk*Bu@L6nxSH0f74Ri~;g>73i+KyhS0MQNzH$A3)dNxwD3dAA&IY=p_}tgzU_{(1VM$LHsW zslwdD$f$lS5I2aMQ^5j7>lqIKg-y7OBQwt^(}J73pk!vyCz*^N;J45oG`tk?duy5XcEL|aIgOPBrer&c!PEIC!)Ebk!fKUGI>F=a? zRtrPrGde9d*l6aZGVc2)sL~I5Xp>Y)^f=S&ovBBbSn!Ay5Q$EFchEVNm}E86%hlGN zSnC~m?_>B|R6JDzN%@l;3sQqy*bKg=9fQ2sTaUz6P=tqSA!+F`TF6Z4WOObsCTvzl z?^)t}rpj!6z)x(`6y|C)Naj=#j>KH;w=!}bRJ2y zKS2X^LC&acVzV6zXFC2B9-avb6pbY#VcLO!Y9gz&D)A?TtldV=Obj*Ra5&KtUJmuJ zDZntt^V7UCU!`i?!rEzRj{AU^358`NNdOr1p5tMFSmY2hAN`IRA{_D+AP%0Tud~b4 zO_fE{%DhW01dF81TXY9fzw8EVw@gIb?h5U$=ZuCt`Gog~LxaolyK1(9wc~?iMI8(#l*Tzj8IaG% zZXD^gi8yD#2k6;rK~U;rBUY8=OO=mS)g_GdGKN_f6CT%?H$mpMaN$>3&fT4h*1(PBut|Xh zvufJK2be%ui#TUy!+|W;)5%_xyG3!Zmfl%;cKlDB5OwcgM?CT#r2OM;@f!?R0LhDr z5hpYY<89M)8wOHHY4e*6Huqk8%*()l7)Yen%GjriUp8k4-3fU=* zj77|ikvejJ|8lg#G{^6mL5HI=Sm#d9MKF3glX^J%hAM}S$&%laecG%}SEsDanpLU( zkN(ksvNBb5*pQx(&L2Sssx;K)IV=W+)zn!bt#)28!SnzkPL3MWikvE2ftz9MrF^aw z<+G|UpCZGCCh6#jxso5W8d5?{L$xaZxZ@t?HAAvANETVZ+65V)cl#6D-}u?f`I}=% z8i3Ao+!+;Zos5;7^%q(Zlg=1F>a2Y`oR@G*65@|={1$`H%Y$OD%cNQ{_~pWo&>G8& zS&9hDHjFp%)tudeAdt7DkrDmhYE{ z(ZS6aE_Dcoj$+VsA;I)Ts8C>2Igr?EUKY1024xxY)UT-)4RH^NWRWIt0a}3-0W|J8 z%oGj`-GTmyJJJY8R{|;F{LmWifES}^4GgqpLgcASNJqGRR;U(NASOuXg#UUF8pszk zB=+>MLxln^k8Z$|z~)1iXL_=+%ip7aw$MCCc$H=TeFX8=ZG_H;jbMgdU2OF+AogWt7yh&9H#=Ck0-~B92eC6CZC=$>r zy0C>iQqMkgI`;Xe3Wtfuh{64(Xp#cVQG+WJK%nht7t@!*!HEVL zi}EwqA3VzH!N$?Q-^73iELBwO-}6=G;AZgkz3$NH6=i=rc>bMD0ed%FJJgyT*$++Q zNCKt4whU|mMj2)a^<%0Wh`}Xsd0172c^}ij0JhdF-Rn7 z>%bNw=-o)1dOhZQUIOQbm_RxY+8IQoP@J<57y=c~S0Q^;QsQ$1jbpRzHNG+bBM&m( zS4U-#6t)F(cLjh*3~}9(hT>N_a!y?NMs`-Ebcpp*x+d!CbbHi7Bk$+Rq)Xg4 zlPT=kIH>h1R12_8M~eqV^-BT*TpmlG3Jz;MNk;S+ng>eB4Z z492oA%BSgGNFrAL91bEtRZpG zN9QovbY38yhtP}%8FTbj@3|+4$Ab}fGlqNY6}%Sj&-S*i2{%cOpZDrT*!f4)-`&F1 z$~MYYM6~ZBOlbMM^UReT+fVKW7Q_TxC=&|hYhKW5;(7gr>^NY*5wgI1kOT*3U5w=^{6qfb)bH|*f2Wmz_b48MW4A?6LLME@eD>*ISCDDY@S_Oy1g z7{DWhq|ql%F2%)wu*Lj1-I%FBBjLXtbWTt(7 zNIUGcLnsfEWkFxPlVo6c?~q49__Mix*V}Wg_YqVajoh9y@s~UN-rY}si6JSo3f6^N zI7z>M5l!*Qfl#;sg_a}JpC*Xqn}&gs_0Z=|R}SNm%8HGCq*eco@diSXk|l@nc?UzloH^eTDh!y`I-p{dnNjkHmCRJ|eT)UycD~9*$Qa2MkghI6+)u<3ZLvg(u-QoCwUbd z%@Ys19~M;qxbJNT42+CcES7LlJp3`>;pGpw34uorB-*n$3R+EsEv}@(P`MBiTBMfF z1@sSqA#w(`Scp?QGB}^t-8Tg-ayE+^hj&b{9WscVjoR?ZDh}I3PO4Z3zh9p>^Q-E= z?~MWfMe;$sWb2~5_lLa(rQg9)%BW7c zF@dU7h0DSb-j&AtttB)swHQM3>MZW$vpwIia1_e@cGLC-SC048g7jP#*tak;>mW<# zuREcG4FJD8y4Q>bJc3G3ZB#eMg>fJAbfv>O31yTz#V;x-AX?+^Q}8<$kV0C4wJU_4 zuHw>$#KD2*mPNVdexToWrr6#v?54Q7Q+x; zk~73gc?be?;G`=`g9(svMs78%Y-imD<1;h#?fo)A6p35KN?p|+&u3wJ!1Cq7*|^Bs z58hCU6$(*dG~^en`oS}c?AS=j*1VqV>5iGw3TFO%bx-jRZ)Yjd2m`=G zVNoa2Y>ePK-%pDIb>NX^+GEXImA#F%XR^($to;&{mzvQLrPEjV%_5Fs^8Q%Xn-laDb`ZAdkj2KMu|CJo7{RqlUdm>f9h%;ft~`)4iAl-GJ!Fb%N4x=PY?bca{iifkNNGxj zU4Z?cY2*+&Ql03T?#;<|Ve852!Hv(qcVqz0fC=(^n^wL0FP?C{y9oQ74$YTdI)-Bl zcIhjC>5$tO-$0F*M+-nQagXlgq6~EaY4Vu>^ABmJ`Vzokn$Vwz9U~!TA@Nsts?16+ zvsFi;6i;hVU@Y-o?hEGCm%54wGnt{A#a{>qIFYgfU8JymTiKM_OVu*QOw^(b`QsD7 zacCUek$V)u#EIKD-Lg%U3;koXPvqten5r(i-$f$wqvty7JEF`h$;uhiD^$HA--P>r z&m_g*>*ca}kf1K^Ip83GT99PiMcsYocJ4qT-YCKAMdHpFq-iJ?If8AI6jBEX=PZy7 zER5i#{tIMa6dbMmu7Nw-P?OS|1Z&bsc%ZhE>%6Lhhl14VhLk++RZg$a@uCfrpiwcwCjhu zW$zYahm$FC(0i@|^ty-;Rm<)dEfN$~G{|H!5gR=LkpzoB-u;w-WMB z5}aBW@spVqBp$8|qQZIa+?At5aS=SZviy5;2I3gh3Ro2HV7;;B&6WF&a=?^TKqFE1 zBq)~Z{z)k>RwHsp;4F0fzWigjZj{W)bybmhTMlN4~RUbTE_ z_V+eWGlppGwJO||pyZ`ECjpSaKeHgP_W&HK7>>0Dc!HJtFT2I0hq{#`BCJ8_!}?}L z$+Re)g$Y9}^&fLecR%(q1F?K_rceSaL(k}FZZ?#WHd zDhV^QS!(9GpM`a9Gi%V$H*2A+{Ib``LH`9{u)1l1tRyrX&4CXy0>OU(p|%K2RGv{% zO&P0DL;CR|g*mYF?cH^ZHcpV#V`DFBAAV$CvSm$p9_rz@;XCExtI=-QGzHF*`{>Vd z{sURkfUApSw$%cJ{a^X-j>P=hzM7I57YRGOtU{hUBTe`>Bm(_ppz)c_?_!+_%9*&O6WAcwHvWPv84d z>N<+HkkrtDrU@5j0YQ3+6m-6;BVI?hpC|h*tm`s9g_?Te=*9a!Ai8w%ER!ucr-Wqx z!R&tQ*JTR7yB&mN#>#_V*MY8xG@9<>i?h3VWLtJ43kuk&?}o}QPIsMeC96ElGN+cQ zgU;k4eZbw}BY4kjaDxr8EcnQ@otNt#KUz_^d#)^Eu)#O*kBup$PP8 zkm^^to8fzn1ZM{v(BPs#7Wt~YzW@}`x#lPV^~^GofEx;_rc?sUz%k*OhDM?;&1%5x z=kS+MNHOi$1x&60qa4zLqkLMPtN;wn$@0Ql%tDNT^MNlu{n`O8`K^pv!${`ejR=Z| z{Bz^wsgKycaI}XF@gTAL1UB6zzHQlyeN-dXjJ5BZBhljF5||b1-M`%$%WxJJ*NR=6JsI22CID;XVaTb48J!5EK zQaC$Wauyw))2*$+xqjx>w>D+lq%)gKCAz3&`1sJvoFs>)t{4~GHTdiNQ{QVj_i`Vn zV*^c=K-Yf~BnAlP6!EeE1RY4RPU1I=tDJ8YD6p*+5xKlf9!(#<(BKJ}Sk(ImgOE*; zawm2w8TWDMATDQl2Kc;R0-$>Q%NM6FW=Fsx0Eo+^lqu%qz^WI)%pVpHl#eB|NyywI zh-LeYCRZo z0$^PQvNR=);(dS5Oafic0cMY(|0hG*ZlNa9D9i)U!km14@vE#G`#42&-+#9Pyx74t zy(cCg-#STFN`-L$LqKFR0&KMl0QABXvD(G%6#K|E(F)%6?aVmM=o)a+u2 zZU4|Y4&LBAwTZg&*Zk*J#3ukIa_!>y$Z_BqHid^xbl}hQX*ULo89Bnm4UIVJRGrHUOYk^jyCam{{u1g0CD5D(aK1c#a*0pLt*3*80NzRxe*JNL zNb0e1fiqXZxplbU=T<9BOjY$|H2Ft9M4l%B+~7S!E;3JEft*=w!iav{K!3{Eo>@dA zE#c@7Jv2}PsF;SIB!G%X59GwHKUJha=l4>_>`B^dCyeue;dCxnugmrOzB}Jhrw&N5 z9spu}^;AWkI$I=tyMbZh_GDaJ3TDY_z?9o4tMpe1aAi$9`{zHEzSuK)H^ z)ZIwHnUeueGjN_*0=h9U#yf8~i>wR-9(vkU52fkPoctGw$O)q-{b0Iq9J~3(!=!Z% z_K_J3-C+tw;UO3_ct361V&e=nn%EK$|u={jlOSnLAB zWUU&_?;r`xleDLfqB&1Gox9dO3@p8IyGyu1gA;|P^k6Td+2CUUlGbW z@^3<^=+DeY4JdO*B?~zm>7#wV?KC0D;O?&?`nkJd};aek!bkGHw`XGJleHh$Y~as6xfbJooPV6qnC0{Pj9 z1^t+lD?t}G`IxA0EPjC@|KjKpLIJeU>$R?BaA$_Twr4Ln3B3SRhyzGhITt6pAeSz& z@q2P2g>fd`3L2oyI-AhtGod0trwiXsTGsp`VtK9r=X*}R`JLi5ohk-N+{O~YN<2zv zm+*Pne|}z#<;JW;{>G2z?^9vJBQF*_x;_((YE0EgNETE)R&;!T-gL+GTDUbZEhM)) zF=Sc&I4A>?LCluE9>A5O#84RaDQdSFY&cLVuWz;RPxl{4j*Rnp^!QE1_u4`s3gk>> zhBHqp!%9uf|Ejk^f+JBLWWm-?OQGko8A;_Ib}`YL)UH!y;TrD|0s0G)iN1Z0M2h|v zu+6%6s_kDJ3IvV3e09&B$lmms@;7dZXka>w{5s$3g+ycg&PzDqtIRo&0~Z`77v1I7 zVw`}1;z-yGhbv|F42%#g3{Ev5!!y4v?j#?MXnGfTXQ=<7cH&Nmt_g3eM21g9qjcFv z3etIx%=owAca3=93uJ+9C9&xJN}2v`C*B1~qd%txdwrxty~bpZ;OI>hoVTaX0s=zW z=tw=t4I>^K-3K1|q`9Gd@mh?74ti7X(M(pnejs6l^S#Puo|b1@C(tkYVlM+I2(XBP zk3u&*3X0;B-i9JJgXFx+AXE85L;W^!>V&vGo9`j zOxZ=y6_&eDCFS@O#LtF*Ah9w`Yma8XBR%htn8$e26#p&9+8Uv($GfT<`}7#9Ws6O3 zqHFPbw)>>k)Dj}XPZ!g*zZcVUAe*=@hzm`*N`>Q?#%o7@_>^gD*+8ywj3qz3`IN&d z-Lq8c^J;=x=Yb7zyvlc?rJO6qJPJ=?;pgG3Wvh!V5KR48enl{a+v6HM2LbfIbzKyP$azhyahbC; zfG-bfhax_Faifbm&Poj_^eGUwmEDHVH#@1J=Gbzm6W}BpW}u z*2LQ_3ANP`Qyd)ql9TNFOX48kLbBD)z~>>>98w z=S9@mJ_`eU5IE!B+s0tw`8aUsO&c3}bV)Nhck$CCH#_vwQ$F%;-d&Rrw>B2@bquFa zfq{h~2~H)GI4LMB;}}PJH5&aBkCYJls{V+6eX7|j#6+0ta;rG2_#hWt*Q?_8qr@9T z@0t2)Zlf>rJC;_xz3q45yuJr7b|U*jb~8pB5uZ-Zdr{u^Y8+@Nkcj*JqiUAgAZH(S4Ma%@*j zn?#Rk8s%QCw;zs=w(f%b6c~z&O3)u`ZVJ z;-Br`F5kftlr5CoijjINTz$+js{tZRxgcHRx44_y_VjfND${mZ7H|zlTs>lICGCI_>{7N! zD~EmO_&Mg#i_<|vHo`BE{F<+LwO<=m9}&QtICQ`FN@?E8Qj9(xn5>qak_g{93jawe zn8RP|Oo5llu&JW)qe`+wBOg=U(bwj8BA2$D z;`8IB=4<)BPiMedT1Yx-qPQy{<0m(ZMo2x5lJ~hXtbR(68T*Ag9Gg5XJra%%=&+HE zLWe|Hny;ml3U!6d)+d8C7{cOjqnlTXUUo~V_TnQC>n=aMg zeK!w&mi>n`gDt1r(uGTsEyM-^M6f^?sV}7}&KF&y@k>0Rw`>$=VuhCm1QH0h+bz4_IZ)l3*M8*==2Nm8&&!K`8!n>OP?8iPjt3XBXWH6pq3eVDhZnzgXI z?EV9M$}j&F??5%>9r{DFc3UXmmImZHidIBNr32n)Wp%iiPTxsyzr41~H=~NYxU2yc zCbWhBjxIa<{^51Wt{T3L4$3r8S{UHHY$No({93 z2QXPvq@IW;99t^3F_t8zzqMdp&yq6x+Xji~7VW;A0k7zxzUYVe)zXSZ!qad6$PQ&g z^xH@|?`nsB82yWLc8GkRK0t=%*)S(3?m4Y|foc#}S;(G}PuRKLrQG$xFn|*IB+6|4&8ZKPlQpLC++}R%ouX+1v$6aRHP&Ea) zlpe4da1*=@K0Z+ARDHOR13H=$mhz3|qn!5CWaa@-(BSYMI?7s2^r&0A+!c4uPD1Op z^2>4vfy(R3eTq)VZyW&5T1v7E2rBQ)9t~j6)(E{zUgFAHl;Kt+|efd3kck4MmC;5hS<)z+4G0U0r((*Jjaq$yu#Yu2LmH&w09D~5d z>Ogk8VOZZ+kPU|qO4nM!xS_b;Ru}%hP9PchiV0VvHQmcwK?X9RZjAquA zp}cOw(>wWm>GRehj?a}y@YJ+s$7;sFcKCbu8ZPbMP)o_2$*fp;p-x}-GUcF~FIH_| z>o%s{J4}JPGv|l@UM$+L7A*ByS~FiFT(M~(STlmi7@(P+$1FrP6KraqHmM0BgwkF% z2SW#f{B`Que4m&C$UsDEI-3_@0Oq6ZrQu6 z)nidYGfCb9L0H{}zlTY2N3KA>>*Z`_#fHyCJvijb0znA21^Fpe9t8G1o*~wNGD{*- zdpcF?>~H5fMy*fVH3UB!l4O<89i9>JuTTM{#+?L3Y@uk4DOU<~fIc!-s#rAZ!-g8Q z6JsiNK#+fbU2+>~h|z5rKn=2NvXdKmt<_ptaY!v~v^HEY=`|G2JxHHZixPNZ^CApK zSXbG`vK1~u{Bb!ALY||iP5U4!?an^yfjd1_q9b%5e47?lm@wF9Si&Y!QZxAVml z$C{ltn)P3@5#wK8AEh6$g7(OB@^$x9&JByDo#dzGxpI^b8R`-Xv9*;7#1~npkN4Kt zdj-g~003#z2EqFK_0oW!9ldZUp|PQoh~`-4rx$NOfV)DSy7qy$i0S)N{>D^vm=t(X`qZgxnaT>f{CcM8>++U#J)z zFdloDeUj9@>>=HP92<6z%wW-PsQYLUeokeVSkQIQ%5C>1moCZe4fdd<|2Iy|brEt9 zJ6%WY@z`S~;HKQ{N^b4T?^3Lk3_x)NT%~w`xxB#%mOUk=(#wX=}GT$fkSEoqhX4aDc9?80wV(1 zSu{qdeH4Qz$0uRV2Pa^1LgwToyZol!n6b00p`_8taPpX^F%%FQVTIPso}% zB?Icy^5aiWH(4ti`U*LiexBQ^gr~HBjw9Bs) zT}g))EY-;0O=@GchQx`(@_WG+@*dld42>MKiK2qLCh)hwL?JU4t77)jSUAU}hSBwN znkituCHxHclchq+vDd|Ao}Mq|BTlpox-O)Ut;Y;K$WL6E5UFo6I!@&K<#Di*-46fr z`mmf?@E3UD7UN%FbcAiTtOX^jH;WQ#uhK9!d zKy%i7M^O+jQ+@5cSc%A^R^nm@4)sfCZz96R%R!EL1x0Z8$jD;=tXhO|Wl7)74M>4^ zHD)CR8%p%QzO|=qB*sZECC9y~{BWUqueWBF~2+_4UOq$L!=Lbz)1* zDL1i;t5{71Ct!~Fb|6kYh+|U8SU0-Q9s`=ic8mDJv644}lHsT#%)#Wo39fXl`>Q|G zY}u9Iq5C!jt~7)B#wipKpNLmQL1`g2P(JB79OVAmyYhBlB9B`+YLEPs{dbFFop)$r z3E|$`>~2#yv+D#t=9Z7F9b9@Nr)_`87RZ?VYZx0?Q*>4cdXOA3*u#NxqOwT%ijbe> z&Zx#8i4*Yjel@}AiPH_?7F93CA=97Mv`ERtBJowNgi&W7rIE`-aMp#|ZB<0ElrRhT z3oMpO?x!=a^fByWNHC^|!l2z=S(dvWAN0R4hk+=V`Hxt`uq(1>Bt2lN8x?Za5rltr zYJBhsZhbmt`{?&j34d$Mj8guwSs}bp*NGv|wd<;M;yR!&F9GAc1oH|~hGM*Bn_b|5 z521+JdMO@ZW^_Eh?ef}pbUlCFGDTb(Kj=HvXJLrE8Cl=xSP2D<=0#q;W`#T7Ha6U6jBCEAra-2cy`C(!zLG zAdpjiKqw1}yvl!*vzsOfKhCPhcF`M#%1W<0;iq``HP)_Ize=PIDn0Yv=86kWvVOrI={LCeVH7U|6PvO+u51||B=?YxS>W| zy&&PAY#ckpXFEmL;qiV1he%qT#JWj*s<-D^Dc|5o@JlV{W#c`~Qkv(%2F`9Id^Qe~ zYIC)|d@t2*f^T)F%NY@Bu$yD@gPHZhWnYn6ExTv4hTmd2K~dOsyX7ewMoI4ilI{ff zgD3X;(@Dpt(irP;ncwH;T#^4b5G zKatoNyk;bMp6ZhpIqIk z{W*csc7MS=!t_`@0o2&ykyCR3uW)^cOW{i~R8>k5rUz}^{E~b^ zU={uoGhxar>NSxKESKpEZ5KlTf+y3Xo#9PeM{TFCfD9AzoL{E~PrJ~|HaPP=z(Uy@ zEYp>-R<64|yR4~%24xSlTHLEgb#wqIblsx=BArpAW59-ipy5JZCwpEW&NK>7w&#B+ zF$MvKs8hQO1V~plv0?o6O$F@`q)_FsKhBb3MUMC?<$Zbtbl_;cjZ8|^1g}e;+MI-Q z%llC%pGKHEniQ=MR&>j>#Y6Z8MIZ-MmE@^C*b~#0ttv1~nN{uh=E1e*TSackdk3Fk zO8fF89)^=)*-FU|*hXwD{}7^IJ!8fQXPLutEu!1a_0iLnITVxN!~bn*8S*m zGUkC%!{eag3uZIrGa~?=GrqB%4C613XfO5>sw^8+8Pu0-m#22({x1Nd6C%-aR;1~n zp-JNvs&uPUb&I>Gp5QTY`CP!tMDtA)Tx?%F7$3xmgio^25l4Sp0oBy5na?E&V6qB~4s`n&j|Juqf z7N2UCP9?H?90uMec7*T1{*to4ai}vT`f*>WbLT;!zoDG?i0z9bbK0FW7$EcZ0BC2KVMEq^Xk` zs4i{?Z@sjm5`;@Z%Irfw%njg-rSz&U$#xY)!gU-W=vdSJ9BdBSlUon3NVwsPOE&mJ z&7m@duGVQMX@sYTZ!s;@x*yk0Kb%H*v{_nKASV9s6ttmqBP_Z; z4P)|sBgXz*>oH>r1TK%)@J%>tKnfrRzGwoz27$`t`<21@LqL<#rh6uI zz_(V<{Ag6Y`viANGaDVF8oS$DV46$8%R(Abj#+I}($A?^u8RW+OHlxPN8D)@% zgynO}#uJdR{dK{&UVM=|6W|xvB0JfT!PUoQzy1nq3udBHB)mTzgKqA#2f<=$6Y+$+ zm{MM;F-k%t>e`KY3LmP-!^wNVFZ8rt@3E`(EDYep-9C~Vx-mR5h;!D>fHJb;lqI7_ z3XqGQzKFqu9X%CJ-NEYK-TvPx@et119V6MR#`dsbuo_|g-$Q;pB>)TYDPpeI>g%c; zp2O$h@MNX$7HT{wM+TqCR5}iE6<_0%WuU&W7Fo+ycA+r8^APIvSXf5bBo54XQk)rV z2_Ey$EzcAfI~JY`4ZP&#(f&o#e8qZ0JQM-tFf?k*aTbfNGw+oRxZ97)xUehz(Uy2X z=J^N~bz@XNT#xqmfK7PK{^}UU6mkKg#DE_{!t`!`aSppnnQ^NsE~)?)d71IO$Ip! zWX<(Oq{iytgU<&=)>b5uUE{vZAz_mDr#o70mHqgyOzImc*5z-kjWQWE!GH1sbVaRXKeF}5#REfVeMj=5tyl)F z_ARMjI=sng(_rvhzHI|0a64=Idr~e`3>#+AwV)^My^Q*v%yhU@PsC8Wu#O|R1*YaeZy`!afrUUFI_&K|HUH@he@=5&H zO`?FRvQ%=|wF3Mzb*#fx`k-Wy?b+fbN|fJfH)Y&Sw1C|zU9y^Q2>9NMt zwq@E@WdBc6Wdu0pxZuk!G~PT>7Wteo*SF3I@&i!{DV5U?Mz!brFB-N)xTK0*F2iaU zziG;gdzzwK zt_-n)=726-!?UrU@8kM=BX-<}Mt~3!-!R@s+nI;WZ_7QPOrLX-xouWnO+cSh72+q? z*+`ioixtJ81`5(scyMF1?UcpbJeX(w9*BgI2Gg2h5KOr(dgF9HgE1V}`aJ)})!tv%X`S3CH1 z29{kUJn4-Yzs;g*E)&cBVX@hEyflxBk{UpJYgjYm3U54^_#lT2-zx3F3`*W@7TZ1G zW!MW~&dyPGpBjEk2|V4Xw0O)n-mX{4#$4Jnm;k)krQueYM(R0^+G*I#NC5v7F9Xy) zDsGBQ!O|)DsDicDVR|maoc;=h)2wph2sNlpl!6Eb^}j8?$7SORac`%5GZPIs@l9VG z@E(+u(LiOoUli?7@Qo#A+GpcJ#|~{H7FGtSI=@3X^4X_|ajpfN3q0)A@2)%S|0sq4 z1gU#)CqOto0gL(nb&Bx6QqEoU1~iNIBbEv>oB+}Bl3h*_K~wcJEO_ne3vrUX^A8ox zU^Hth^t|h8X%x?d!b`J69&NJnkG1FdEHgRAyKY7AX`cI_DS&gH)4Mc^Oxe^}wxbD(6JY_CTvO?*kUI&7QHn_v4fwW{sZLuFox5r}ATR0FS2>c8ytal&b-*l}9;V7t)zRe#q`;OCr6sPjJY|;&=~TJ8;z2_dsS{Ze z7B}aRur^ciT1OZf@@=!vV?@u>)8Gl;53S*;vm-6dwXl!v4NX_2sO){-{-qR|enFFM z_|3T~rF!Ci?6DjMIbQA!^ZaA6Pe1>~bC|0R?6Hk0RC^9+)H_P{DH!iE)1F}50?qgp z@JW?~QF^pNF@^_BJMOch` zUMg0>@rHfmO!>Zz+oIy({yTX$`>0e+=b}oE!NKWhC$piTGUKn#K{{H`(m%5$PAqmc zj|OaboPWP7E`%6YeHI=ipFZo?-&obrl5Q+-;ZqQIHq=8#TcxoC!3AHT+LKADwi>+C zo0ehF>&c!Yaw8$rMSZ+Tec08z`HP&(G8my+TGSp48_IofUDOOzCqB2s-6Q*fp-@1m zc{uW*1Pr&CPa3n+WGUH=YIc5dr#?Cw#CQBt&hsDPy{8MaxgFD2lMgZ$<)IYzDCdDA zgSKu9;?*dgtrVK6d6o;okxJqj;j-`G!H!&1EJX!oah881x7`SE;~~Z@^q+T zoqb$Aw)}V$;f_7*3RDD9lEQc>H9q8il>TJN_^QZ5WzUT0LN#Jwp9+17G&NUk(@24R zT82>o#9>a@HVyG`G~jh27(T@CpE_jt%4X}0VvhZwz#P%!Sz%a4S`_;Yd}p@fh@X;s z{bkzFen*_zm?TYsYHRSKVwOo!;uV$*ugd?Uu>NxI$6o4Wv5%*Mdhupt`% zM#lPpd0PWXkgaq2+$iO&$A!zyP(w_U9!DD|;oZMklaH=k?||CfaVg za)<(h9I#I}n%Pau;Q#GY_rXvIS)bo*Aqc>HafBSA3;E?D#Qob}u_tY?Eic3ZR)<0X z8!g+97;&ElOza!SlYpxB%idf-O9nUtJmx)PkRb9hnYMW6JS@+F-4zS!8QNNrst zoUr=1qJ$vQl-xH@*I{LDEDC7f?A@`~`DV^Fex^0#!8+dma{qAW0AgC;(b)g&pLj*M z_RZkw!X18z_nfza>K|apND`|g)BfqkH9_P-ag+#E7*&wh^1B`=x{wPDu!Y{UhqwL| zv>&5=x5)6PE=pz}00Khi4-8{)IL7rQjT{afZ@m9^vqXU;82os%P=dN%+NZwBfgLdF z&Wuy7l&osO7S3OPr&3T{3z@_cAu?Ti8&OaNty&wfDpmIS<+PX_fV|P5^qkfm=rPJq zpoB3zmkv$cui3do;fy-43_Cb_)SRyMxnHf6?Y!x^$P&$cabHNgWfz1Z%2UMcQEnZJ zbQQjd3*{*2`*~UAwGi&C)hymKC$;VoaGB}Jw*o=O`3QZtXh)#gHnODy%&-B|3d-ZY zHWF?c&?pE8Iq?XJ<8(54&wLnh=qt()U_ny6KqM2%KFm8|m+#{Lxdh~>5t5XV?q=HW zy7Sn-eh-p`i-w#wNuIX$%DMy2>NPrdM>H?PF04A~149){8rXc`lCm-;r#x(~2-%z3 zi{7fKbv`(8%%VsxW`r%?K@$58vX(<_4;Y>H#8TQGOmtN2DYT_Ud&BU3mYTl!+^x^M zA{4&wH2Bw6k(juLU*CFFuUFN2`CIvd(r!Z3tw6p)htRe1g*%3nO%%7l{Z3sEqv9L& zRm_QPXOUv3vq{K_7{{cK{GP2Y?`RkPphYS9?mOgToH6ZReNq@X|AwwEw9w5|KNpCW z4o>}_qDd&8sRb3=WtR`37l`yr8L3f1EaNCC9VRemLXVtwm0zHk4}S2&diCU^Xip)U zDO&o4_D`cohg+85q(v^P=9In2WTkxl>b;_TUHSB~_*&XBuJxMf1Qf>4jG2bYY5#ot z@{*UJY;IrN8|m`z667+UKnDt*yiiU9@o;S!wPDr8RI_`PfowFthPNdS+R+A5{C|Rc zRjV|RT`RVu0v!g%3!D|U-KyH()mNv--#e4BDSa=c$$g0xkr3`jL!ABB%Agxb7-Lz3 zwdV?Oxb0j?Ki%l$f2;`#+sl?xYVEC+%GLc8P+Z15dXi*8>)qJyDMM!{hq%Q4_J)J2 zCN#@E67NTXSIfJhox(R!Dpvc{+79}R^`NgEi#TJLrNPE|A-!kWTrRSV`83TQH^}#S z{QOFdJozW>@G(RsMl(JI8nxS#t2?;qa@y*w#~E{}zH*Oss?h81gc7N_Sl+K6Y>AJk zy4hD;S=j3GF!b4|NbvXpk=V#x9?Y*Y+v7H?=isQ@AEtLA5ncAmlS#w7l+@etOTP!_ zqH)!$c}Viz7`eC3)P8S^)~bFsbigvcml-A&HT1SMw%=+>(PhK1IN>LEfd2?0g}~_F z`yc)z%J+0~E>kM3=Kn}7vi*YlqF5kZSS6vMQ-!3#9!fuaJLRZs{#6>sx_rBQ^7Zp_ z{<3X}1!0I!v}G`d;f6rrSk+2}2opaKx&EYtMY9D*C-0nxQxTRP?Gd`>T{Plvz{>-v z=D-&8U7}!5kkO!Dj5+yc809r- zke)(QwIW}d9hYi>4%XPt6=%7Cm^x@tAfc~fiY|6_epJU%in;acy8sz?R-Q5Pwr79h z^u347VGSe%Jx=5cqgkPcpSFG0XZx~EnqG{!9%juYD(BVLeYf?&t45I48iZ(g?OC^? z7}sJEXxx(dw6gM9%MiJfq(V{gVW28XrjJ|9v3J6*3X3{@89W09VZt%Lc$3FX;7gM0 zvAUuA9E|oITk&qrsS7E}-(R)s-_%eSCcwf*o?6`HxgZca)E5H=BmWT2a;XNA;!8z? zZOuXp+oKq9FbE^KsvVqVT3;}yt^kty!>wDkdpRPT$NMLY|zHbthrS9(F}>2esK`Vd^`PyK-dO+6-^nWwZunJ zm>@Ky;P>j9u=Od!ZjwsWCe90IQ;0cAQ5jX=&Ku@fC76*n`b$mO4X$0f9Cg8Dz{cDv zo>8)X-z4RfRCBd7Vxlx807p5miY+Kgy}0;_(LH zUw`?qp*(&><9f|nWZTntS71*BQU`3yRxDQz)>wRlpKr;2lm5V-a{EhWf zT2}h5`9LJ;DV}&?U6e6keUv=>YwXi@H#-?XC`#M$o8gqZOr*1JmaBqDRX$7Ak^wvv z2k&+T>1p9_$`=&2Vw_RNkPn2^2zlFc6&IbLi9hF_m5SHva||1f*~W(kf`f#!9s@3d zGh1?Mw=(ofBs&-!|H}4SD3+evSM2rVr5U?-MCY?gxe#Rr!BO zY)evf^s?s?>j%fY3Qc__h@K1|EZiEtTa*e2;t4QL_XG=@oJ2CyxY*N+XoZ>@s$|}s z_G*|vCkTovLQ|JLomQdv^J$szM%nY0;amMH3aFNTO*C(&-R}u!E30ywQn`iQAXcHd zc|^pLfwGTl0*}o=jr!y0puYABUJnflGZ(87n_iSWKZhp{Bb#u^;E}0uN@fNbCjIw> z_qZ(PN?rrOj*KWjPoSbq_b`CYn^pT}f30mDX zjwj0labeR#nXXv154>{B7yH1pmH-{R^B&yVa21A;}ZV!==18|zjlfc zSK$dpMH;C@V`X_Y^P=LOBq&hebr=H^*(s)0vW~ z$u{$kb?j$%`MU;#0cgDqsKhM zsvRH`9?G<0;x@E!cG`U7MaF(?BD3zV2LbnPU_IGNF=;M4!lI}Egx3dj1%c2T3UkwI z+f?B7y35LXSIQyn7H}W#%(VH}3#L?vCNyxqM$%lAd!b{Gr=KZ7`|e8O{hAo~43&nl%*dU$ zgQ^2YgSr14Vu01mM)wrJ_3LNFd`Vz_!R#XKqe^*BTN9qo*#l?sCAN)zd8QBt{4*Wu z$-^SnO=}QCBh(F5`LJPF)3h&&m-=iy!HH&NM<(LO)0aqgzT!=%+a9(;B_rm5{R8r0 z`)WY_ZH<*;-ya*usK9~GzdKKu+8F<}M4?a8>K6qo>cS8Df@}tl08ypn(hEl0u#T;~ zU{HegA6WnUytbc$zaVyk)Oz@MsR&X%BRG0lg(=!*O82RdD7J~E1Vwx-?M5qc8&@Ei zL)qdss*5dGFm0mS=BSl&n6KQTce3+G(S$?XUE`VVcUA9n z7`K}^)MV6byJl;I7hLuDYOFLS6}DTZI24H<_A|a}YRJ3lxi0iGu1Pm;Q+bVCk&~AR z?an$SGP@4TiN47>p;!mhP_2M5j`F<%e%mW6(1!34J;9KN#dyPyt|HUpZ;~NJ@^R=x zr-lEXkAZET=6NMy8|@rumEaarbs`2JA7Q3_kt7|S(RdO^pOQA(V@G|%_XPPXcVZj# z@kEKJwmQ$}RVsMUk%5X(CFhEJHTY|0y}c@U!`8<1j?!WM0Q<^K#&!CaFMgmKjTKH1 z4#=22mC5pnpwm~fZN>5L>dV9wOHH&^8y}vZ03jZ^N{GPK=#qAqY%5NobcagslMD;W zmXx|hiIx(au(iT)c(dQvx`=e7sL-ngF|-#3zd*^f7ZvJs#(v~Z0YO%RBRm1qF9DUv zu7s)#PN~w(j}iXD-G3^6C9Vd%y@TujK0SZ_bX9&bbIt)%UqJ)UP3lrzUuGA&o6;f3 z?=ENZs&&LEnfLx0vFf|B;&qVzS7p(9t@W1D-AAdEcfQLkI?al?W;B<+d6fP}`+12x zm_98-YE4Vz3J<4cQnp6^;67C`|6e{AA+U5a6n=7N^B8F z|1`yVlhF{v<)h$<~an)c_;}@m2pXIN%Fh)@bLw{L% z5RZK3)#53#MtV9w-$Dca$O$u zEt-Q>Tds}iqQ5c4l9U~0d#VvuY>!~yp9&F6Xwwf<4Zg6MwDYyUS9?t&a&sb8y2s}{ z=BvwqQ*u-a)jo}7m?JZ%0zB~h6>t0NsHjmvn^v%h zJ7Jp}AW-Cqxe~|c7_=@Vz` z$?%WZRVo10EP~P+-GrPV3wztM}a=+*_C%WQ=*f2G-^>i0AOv!zb z2ecAD^VX=h)-u;#VP7C16n2yts!eBWw48}Fh`Yd$C8AqPC`d3%W=w!{f*-CPEctr_ zE;a96Vj|4f)L*5UQt7&k01BJY^>r%YN-WVjfx9Z5-JlcYHnz7O|Dy=4;6{q!0XFAV zsX-T2>MJ1}Fp}6K91yx*tQZvtU6?M=fs7{SF2A5&a-=0?GaGq%xf|vJRTgQ5%oCkX zw}5AMigaoiaLD1(r^yV(cjsZK-)FkKVhFn%B3JkR7(Q?r4)mN4snI}-! zhFD{EpW^)#FzGLKGUb@g?xYGFu5i}eTsFq@#0qE;Zyz*1tn;WrKJ8{w{$8sXmvMlL0$YMfN&C+LSd2gMqxjq4xxH#0(TFL8iz|pPb+k^#01NoCqyR+&dD{1Kg$-V{uYVv#|E-OdCl<@-4Qs!I^rLzL7~jR`5BEMxAL^Ne&*A1E2dd zOZTIrdHwvIh_bmt4=e1~uBR@-kuS^Cvm6aGvDC`_jK}8#WsWh9*QH-}5VrAACP4>q z*0V*G`(OBebf_sbLu|gDva+uBXafoxwpQF~F@Vbg+`?Yur()`fv)78I*w1cg9mF*+ zPoTbC9at?3<{UQhYU%BnZp&w^dWS*XNqvPcYIV&Tk6ax~a@w#;_xzaHKAti9O&Kx% zEH6be#2rs}R&QKTHb~+7Edu)pL;odGH6lyV!X)Q1X_rlFU{57|=-4=#fPzZWC=z#x zvz&ZXFe?S+b#3t0mt167Yw_pgD@J?GK}HEXxIdvNYWKhNFPjB2}A zgzqo=DTD-EB{mRUta}}f7qIARlWLcjx$mVHXdbm?7fucXS5BVV%_G{CpY9)T!rVdM zcU8o_&w3EDGAab}eU=J8p4Ldkea=~_Qu!fKt&)FBT5+D{OQv%EJ3S;f-q=#;B2`z)%uwlt_9m+mO*}*nO!Co+k_L@;+nX6rE3f+(hKYmmfHkx_amEaD|3l6V|DE z01n4q9;+l6AG=<2&1djpgG&!HuU>d)Q}vS7X3c*gpBr7y#|7Z!HVXx{4)(o3e-*%$fg zS%xT!bgp@lOE{~zf^H_Ko(-ip(I1^|*53$Xn?8Haw^UQS3N=Cg>p$8Gn2xG0U$4H) z5;F&8`F+_KvzYb!OW|VIFbg-0w6e>584NZ6NZD*E42O7pk-uVmfrm(@$7El zR)%iDuHoWXc<3|VJ2_qF_N>glN>EyQhr;<(BpJT_}C&ET~9K+BhIDIz9T! zhjx}^V^sYOLp-;Ri-l-eNXj**piQEE82SbfVZ*&la>y5VCC594!$tZVLhQQG|M7Yw z{KpCaUb02>^r+h1e%B={XChyKNPXuDtM<^-;3RhL?&`KnmgM6w5@-@l#lk)unnRpX z?G*sBQ#XwEKPNBzreQTiCCrk{TtD5r<)oBlX>aq|Xv$eR)AH$?mxb+lcvfPH=p`m` zEqVPblG1nqgQQ_B3Q9gl@rYYbqm^ zL1W2s8^v3)aW1`$DAH4|`x=J}ZAU~t#qHW}B>rl&MpBH9Fs>;$PmeOOcCZO8FB|3r zf3O-76^>y5Uj3TRCT6-3cz>pc+GtT&70#N1F+XN7-lOt5$D+sXK>i&INYi!>IA^Xo zmvxs(*|kwr^%0Z^oA!Z%=O9gN#-K8S-}rZdEnj1(z6}yB9PC6kP?uG;t>cg%zSJ-i zC+**ngir%U1s`0|HgkxSepDss#j*Tm^`8l1`Np%Mp?8N6P8O5cf$2NDC+_I_(Zs4s zw%gk)+(j>t^x{~?4*2YwAN3fEq^?L9r-zia>`%ux( z1$v5k^nbfaa9%! z;TClq^9Ild>hj;r!u)0@H_-3X&S-7EaBsjQBe0t}8PkBBo)^hk1Niu}g9Vcq*OLaT zu}Dm3>{F%Qt-O{{z&gYGw4dIFn)mVpTgO`&pJ$s$s z$1snQRc4)SCZyqjbg+H+4kXlL3inMg3@<)rr?G+p)0f!Q6fQ401@(_w{4ziESt%UX z1RotC$JKCpZ;nfIy4L348a~}4af0+Xaw8QD{}>MaGmh0^Bth=^F4uxvM$p-L*2BiP zf=_)#=!YTI=;(HxZY%!plW5L~ZvQhCGs6D*FUPG@6J z51iSAkI279eum7wf@&>1X}kUZV8yHUOqK{Mk4pe5RJ_E0x&b;0HUuizM z~3cilWHC~HF%iQPL-LFZ2V$+zz zxFepCZv8zs#QXM$n#ZVnMZM%T%8N+x>UyJ@^2Xx#jAa;~{V1^j*L(ok1t`{9gbTs$ zJitD;62D&qILvUg4ybEOwwpdh+0flGxGSQzVyy z8G^#Orn)`FEg+iCp(S6WVL}$+))=AZDwAm!uwN6=e1tIgOEuht2qy1B4Yy-8>BCMr z_?<*GX5N=#t?EGV&EPc2NRPxA$3;2GjTRHC>+mCb?N0-pdCT&GMu~;uQksWak*ge> zpp7jTV0jh`J?f)*u-QH&1hvrlk7???1SiN4F7`}n%xz-SLB6>( zkfv^#{#1$^n;MQqKf~AoJ_z+FtjcLGOgyF(o*FDfws>*EAK)}-LYOHm`JQ-%7BT~A z7gGifkAkZv^3x{H0-v_qxkS^Ke5v_OGAHDZDio7>5BCm#3-ZEEyVvWzy=1{;97m!Uk+ zlb%_ccSJcjyHodT&OFD;4$OaM2N)9pz6#`vu4)IRZIgb_U-2$lA9o)a)9(>@T30iK zz1o_qF}{;c+GuzNaPrS)rBvp*XUC#Z(pK^$V(a~ywpf~4w*D^|cme0d`WXR!DbsOllhq3Z{i-fS<&mLJZ=r`Gsd z#NiK^-Ufu3M;*+h4@p)$rX1stF`W`+@61auFrc5;-^i8qiu1XjpsZsae(O}DVfbqy zg`GjqLMJ@aVi>G@u02J1vVRWlYW$e}kt(K-5wsVvCi1Vw=im_FYTo2~01N#kF!wKR z(kc>){nflgFn^@pf2s0i(#wOv+CXFLpnRJfrxM;N_}E@i&F)N@r(>bi!5i!OG-yZk zWQLuiRe5ePG>G0@Xl5x%3nf0EljZp3l!L#qO_AEyxer#V?N&;6+qW+xA5xKKy&d^W z=TA`LgA4kxK($R3`=fWYFVvd|j6!CA@0RUkFy5t^Er4V0P+LnQMm)sg2M44Uy4k&! zeuO>I`w=+ZVyBh=Ipu;CnmbZe4J`yi9@-G3C>P zgL%a*m1Dmrynk%HK(pte1RVLT%=29!vC$ypCxN(W>ztDd>CmS-p18CofILosvi9b9DGpTSq9=Sjz#BYEpau-SWRqe^j2DN3gqQFEfH@@U&2b#f98ef=0U zzvsOV|7^KdF1J-XG}_G)87l+*_e9n2_wHY(vw|kFz?8)AZ{ZuQ&$1^3RIeGszCXz~ z1dFmpjV~f5i_r*u(Ce(S1>HeJe7y$jz6gQ*=IL-huB(ULG-PxT$1}-?hqAhV@)(Jr zhD$Ex5xr#kl~mIzDO=wep&|B%Wy@PzcD99m+ozwsi3AE7Y_T!y-W%C$XSk)(UNbJ6 zc}SlZs<8DEq`Lq#@c^`;EDGR;wMp>Ve76#xjL95G7?MC{u(ig zF8hjUFg{0XW|J(u4o^@mYpuqOiq=uxam>cmWX_^|&ZfSxP?rDFt4R?eBMcNcF-S@# zpKj*%CH!f=fyH`N?jNa~A|b(uCaw{d=m%jQI6pyZOBAZ2AQ9ugltBkz!ibZymZkk- z!@BJaBAK+yn$HL60yspE{Io>^MHg;Ls>n$TeOM`EU33FnM&A$zIFhGpVkx5*@9J#v z_ei?=`)jCdxS|ck*rwKP7jh(p8qA~K3W^r*QZy*|@dsb^vLUe|hA~qxX5u(2%-BrQ zIi-hRb(R|`&R;Qgp$zklSXQWU#8}XMB*M*PMk%69-PGUn)aR_QVqV507zQFXc#N3j zioDLalZH!?h2d7I{euHFxZ8^RPMzz2g z8p^twPo>838ul5%7Oh_^F#`P`V{hRXWf-lC!Z5(l-Hmi8A<`YvASfL}qjZDhfV2pR zARs9SB1lTt(A_bFAl;30-4ERRo_)^V=l^TwsHsxzc5H;?wd zPZs$YcC?tjEf-L$+(REWD?U14M~No4r3`J74#U*-rPam8F;wztmm7N*^eTG5<900Z zD*O6RsgfQ3;Ui7=;!+w7xHBaB_P5gmVAVbv&VsSrUF*Fi=ea~F-;i9LI=&0C*gGD9 z0&=M^LK0S{Cc8APTN{%-edd6dtsF_)@==SWo|pa$Y>5R)8b&hl^VInjNUKmzzbkrB_^E?1cjxievW$G+n`|>nmqoJf(1J3$XWSF z{U=>7UUhh&34~B+rX33+MpT+Q|FsLLXeozZDk!B^W=#0`%<2l|J5Dojod|RJ6xHBy ze#TDyyxy=RQ5;Q3X6N9bg^8Q;MW)+N^u*+8NTC*1(}&qov=?megC-EM$0BC&?7x^` z7Z^`p9?;v{4k`Bna?1}c5)m!oPgq)?_fe_BlvH!&jSq4Nv>7+2?-%+zOO=q$!4JN! zlgtxK=No;g`RJZc;E&9P%_5sL=!ctFXhS$6ob3`@;Jo zQ@u}jK$C2jpOch!zxWUqT5euDHtd4bmyJ3y;Fk7h;Nig#mNe+{I7F5Jq%&4{yzN&{ zRJgN&O=RnoG}yaX%1tGY8}B)7+tWcQ2{IlS)!#v1$>BOWoUv*nfhaI`cbVP0|%0*Sbvl>8wbz8sS3<~Pfjkies$8cGJ+staqbkElNqUWklZ6N*b zW1jn}QQIblPAqS96O4M8p`$uP@#MHHIs@|N<=R$ZK|W)ws5a$Od+FHbHFM8&b5j|> zvIP{NuxHrctO>vE3#{<4Cw}-k0=HCc+l>iBNumfV|A?vB=z~$Hp0WY{+uEL=tte%w ztV+mZU!6TpzyZuTSloIbnB0+*s+tihREGx?s5M!U(-rW7$}0|`_FS)*AE(Ra?y6Jf zl=a6-`Bu2=T#lTx>0VQNp$4C8fI`<+@&ggkfD$kGdlTaO^?A^mx1$!2RuYdr`SFf` z=Nipbi-U_Vbn+38uE+l9Q^&^>xm_#A8^g#@wz znR-R-WZDezmvG)jNG5!4)Bz`_@hhbh0bmDjebVKDLAN5D#`--0Yx5Lt-(cq`u=)?* zjRPYV{*vZ@l7!lce-%=bd2i1>e(7jYr;y0=&~+K{PCjq=Em)OhShGE(G+UgAJ@&n= zIw8+yQDV1c(y=F}ijIhlQv)djbixS0%EBVyAP>^gZhaTz@MnEORc0W%AH7T~dFbPJ zC~Aw24R*v&bk52$E|K~oq5ka+9v@G>%yH$#X`7XLok(mC#$_kNzEV@bXmBUH+t!QI zKVepdOq~K)yT6@kd)H?^0zIKeg7L#nzp#0&P9OK%-Tc`u!L>OmN5sJ^1uk{5movY) ztH)J|3SX^zFv3ogB8&c7BwiJW=*-2U^$vkMFG-rWg+sgQ0fIFr0x5+>|B|CP4O9f0 zP!S$DQeU=!RhxXD5@6t}*Cs-eRkN}qgbih#bhOqe2+{yL0f-ku;lYt~vhi)?PuwBo zr(Px;NPP`TZlOWrpLiIc^Os7}37W@@3GCR_LpD9Hc;w8O1<=}m)MXFq;TIT(J!Scp zTPl{E^8R(rO^fA$VOX|mZZH6PFjt?2ah{up_chy6I+;HwC`_&Tqsa5{lPW60h|L-YlD2aj_W0$T-DiVqKItl1jdmiO=@F=Y z?lVcwOF{OJkY^o53IwcqjXK#^@lK?z* zsU7BJ<@sa8iF$0SE6rJ`BcdB_^nwLC;l)XVs-vU7=0R+$`ZhUyA$yplw+)_a{W);D zBLN?~x{vXa&zjsvYpYLhDKkO3znb;y)Q?|2%VDUEWq4G;)lWm}ZwR90=gIL=7d-Cs z*qe95G?7ID-GN>Adr!AmZaB^!-FB5KIm;D6K2b3gF4)kdSLFOtV)d*>7e;{H%0N(5 zk^2L?@x0zuBgq~2PNkiA3VGGM(ht%h1IKiOup(SoN(r^Y*%nWWk}#+$N2p*;+G&(< zmH}S_zNHZ1r7jQY2kyW-(R82iDoihl+F~Vm)z$GE%uj3DdhBe&$!AdcHqCtTPi8M< z3q0?7IdNdB%Iw-AzF+RecZcJw(u|F$z=-ndB@kJPj1Y7qO3-1PHyf#DzW+(-ZguN_ z@)G}u=Pw9==?^rdE;7V@`8RccW*$j8s%bqkR}3P!gcA!3%{aC>BgOGC49|wGn)intPc@`E^`QDf$2=48LCTNIfdp7A6JV!1%f*~Fz!3_OSC^T431&+$7W^+ zqvOc>DoWFb1x>`@FDG%}moh^I?z}{*9M>oI{9KKb;C^E`B*bhKohg8Y<=rYGs)AS&zS`#n~6bh%O;4p7oo1gc=&}pJG{S2iCP1D$>7eV)p$OIbM2y2Qa+f zx0J34r{C@`d*KeTd*dKOu8C^uO~;GB{EhzeSB=N_E4%Hsr?Dn{N-zZgBFlrJuGPUbP$2sdwytDSbQCt8u9B~bq2nKLNOjv&|I7_tb|}ND6FFJo$%M< zEv~R2T;v2_X;xfy`eAjrzTk!@_()v?8{!E$1d|s*kuSY4@es1BD;%MU+^&EnCacU} zq)$~VQ@&r4%@}oP&?j9HUfIX8bm$B$a@;6m3J}}lAPv^(+oEwUc9H@1r}sQQR0|>+ zOOrw*qjH%@K#^pXi=?5qCVC+#?9Z5ZB(H}swG=ad9u>17D{K@#5s_+opLnYszO}fL zvRAJ)!<+Tm@Id|a>#2S_iJYbfuSVH?yXTCX!5Q%@@(@$TQ?nhn{KMxlt z03huA)&B5Os$VKA9e&*$acWTXE{GU*u^6Z`#j(SWIZwj^&@i$&EL3l>T!}l|)HPT2^7R@f2>+aw z$A?qiQSi0?PZT7UcmfwjO}iusM;u|cG%|-XC^3qAA|23sJkmcz}Jf^e8(A&@m}1ed+?{W(nP5zHbi z-1I4_IRzMufgS7Lr;y&j?Wmsvw5LZ&sE-nC4l6!4bCe3-o$e7{yQmx7u4uC|38}75 z{2#QUUP)CqEP_)b{!hs)a_@Um;5uQzcP`OSY|i+dP{k4kriA^`}$76p;mzTn&L zf>dDZTK>fT5A1%RtLOpi zG81w-@~syi#rnGE0}4@C=A*fQvPOJPhAf3F|3P$}t#>p$L!4$6fqh+yKO zh|R)&>jn~T4r)J`85*u2Pw-QFi)|JeX5Li(ZBo- zHj8+%KYYSKPM@w3;l-F@Ze?F3#A7lRr!h1NmVJ?6L1TspyO*fnr8IJT%vt>KJN36W z&BeO+y6sNjoqn*6YFw#@jm!=jst~^@Rv+!5w;y7L)6-l_yJQ`BXNR2atVnKNL=D=- zK4!C(sj0^L?`*_R^mN-_c?c`$TP6jBHQw?Vul5cJ8NGu$f<5ca>}as394Px%=RDKo z0F{_x6MRWU>A0CQNZE$=>f-w5>@^@_M4;Yd3&K*BciO$ow2F0aXXzAcvn zz#=2x9>l_zp+#rE2QH{r9CIG@IvPF)6*SrZ(P0izB(SAEDzg$%>bkB_d509@{uD%v zj+sv;@(z#nPAOK>!$hc3N}9C0(KzV88K>AHbp`9$l0M#E#)`lEMMoMdCG!C+ON`)C zhJ&}U8(ciEm48+s->U|5>AKt|AX6?&B1jQ{Ox>eM=%yn`k42EO>J*IdG7XQ`6bXd! zQhoa;Q)OR_`;@^?-jo*-aD~C8fpJJGO=yxLtL0zrN+Y)Im zixUo)EyJ@7gmSvMZkWRc+ShnqE_Ic2xsDBEC!#Zv85k>{i=gQK%@#>bZJ*?K?THL*Y7WJc>{y#raa99SDo2G!*e1XkPy{iC{B?T7 zcV3x<8&z0sRlFgk<5{j+ABZ->a5o0bAB`~n0!rXapef!NKBM8=<0-_XkO%fchF)mQ z#s_<4*WkivAae6|36-idFSg@?lq}^MFo-ERIEh8paSw~ame^L>P`Sq*ZN-eUKM6C*Lg9A3X2XRq$_$Cn*|C}+4zwu7J zX(#2nAc;T{e}j?7jm(|IoEJO}>Bqj?Hi<7SkinJFO{U;MmvYj;6uNVLx=CWXmfn1kZ619w;FN zffI-dH?iAnM1g7^C8!Al*6GN>#ou#9Pe!Ks6~IFbpb;4g1_l?Ud2*d! zS?EfSNx^UbAsE?CJHa1LnGa;8rFqf!pwP*+>~)xB3z z%vFCgt9y09*f_i*kI3nE^HOAL?8Q^cpeYh!)nZl8nsV7cUfnfqMI4sIDc?ki%O&Hh z$?LH?JRn>eS*vIx`WB-f3|P@s86PdFLEEymgxF{HT6~d)1pzNzGc=JU-b2E~Fzw!2 zrAevmYfUDz`jY^CUy=Fo2#I-aw)o<&J)#Rn;c)Ex0Z5Y1thGwLG(aL_SNrWbSZ^o} zQDBcj{DShj45YGBG4TF_<~vqUhWL8PK@&8@Sr!waGVAC?j5*6{S;LzCMXAG@LM$^IElf`s zk-?Ny9OeCBMfmglR~QKm5#IQvZ^~*vJfE^rQ)mC|P3hyfbTUVn9hKTJx`qiZf;@ZZwdJ2BF+VXZB3 z69&H=>L@f4M+$h0L36vwMu{V}gNeza%OYwtoxLw=`P)oKo{!`G7z*aGY4syiI@d!| zFewYZ&u6vC@-3O{5U@`C$RUfS?RN!J`d~tV3=m~V_QhEnv{*ZF}fZ%%Y*u^ zHEaHV{C>uZ?cqrm!XC*oQn>G;?2a<_|8VfAV$)(&GD*2*Zb*@`)PZ_Kh2=!B3@he; z{rAJdT$fwz5C7i8ki8NP?w(suU+{SN_xNc^fhx3Sz!-;bMR!k+ez)9tooJ7HOtoeT z;PWv2J9vUn#E$I$esD>xhl9t#`>!SU0lBgN=%s>>pYkm>#>mJyVx>gZuuF;QuH67> zKNPQwlQ-EYWyjg-qG=m^Ei`TRHEHNO&x@frt3Wm<-c#xrFNRWDSkje@O$^DM%yX>& zdh|GvCztk}4@b|q`hHrBfuF81rOV3V12|0wGfkgR)y?QE{~dj}X~^*Mc3Z@cWvKrD z$3Op{j}BQYK{0Ak;<;{j#r$oV;FaDH;+Hs3D8-Z;N( z6tu8{_#>9mS)&3P3jc%Uxwn6DFvR}8*yTo1bjns1*#3l9elIV?wRV|J z$6s;xgWPC9^I+npy6dxrbmE&t$VUI}Vmrou_Rv3pH+>NJG>rcKyW@Gc#CF6Gi%&He z)_!rpksiI>r@E9`OU+owDxl~3KnQBLl7@)}G<5wbNh|ekQl5WCsIvVe|UWt74dCN_6 z^uKAWe$##*`foGkZ+sl8wMeoK>$2AJ8K+y;!DXS#2m zE6`ngSJMJ9Ej<61zI{w52-ftY%!j|9B`o}(a>M5OOXJU*7vbn;CQ|gD?`H|!(Ygw2 zHj*Nz*PdjUROSMn!V}oy9d~1>9v*Pbwzb{Wy2EZCs1zK3+Lq}{>A{6 zfu8U26nuby2{Ewo?zTJIHP=Uh+cpcBJ3RawQCs9%ky*Iz3Mmf2ZCc-kE^My4*QEi$ zf28)}tbh0I>Q$2;oF?xjM^39;BPjCv0vJGNr$8zNMFy!?$0igpI3H^x2LwN;z=9y! zTJJI2_+YfeRmxDb3^{CFN4ZgSsC{#JRLN7~31$E|EmeYj`}3JPP@}wthYd-hclLVj zOaqGirvq^i*k~nW0~^o*yoDFRWcX~#!1>uy$!38W)DP#+kz0z^P2ggargwSQq`ba# z$(L|$^Emu`EAso8O4pcrN%G(|PuzX_!c6E!K4&q=YLg8Y1;~}S;0u-JHw$r1Hz^Kz zke3dG8j!+rvLl}ulxy66Pj6~ueq=|lnJRT@JZS|d^@u!1yJK+($yrl|7S?_g;kta& zS-BrEOX$6uHyC`eV4NAU8NGh|b>TU^zgPg}L7#7aKl`doETdjJtFzZ501zgau(|A- zV&Ggd?XJbb1QV7rzGm{bK)l+T zg}4sWaKDBGuC+*yWrjbmRNoE1x*QC=K6yM_9KBre=aqX*QJs{^dJJ>e@49$|9FOUNZZEqYAHu6|*CZ;!3ix#W>+%fm zrBr0)C;R!QPeZ(EX)sN}Pi;2)Ea91HS{Z&~P+t-dvKJ9EJ&zLo5ohnZL4x!Y;6<&o zEBhc~{gkPrWZ+5g0~~WZ6ah#(5jpKGBIZ!Pn8Z}AJPQo7v#BUvS?X2x|M{hl;oBui zE3U4VWYdrYDUklb45vmjyG}un7e$7V1?2Qb3Bg`qTF5U$f7WhxcVF!gu zq@U=scvR-kR8$@<49oO5#o*rYumkhvN(e`R+Q%;!fJA~=e=K~ zmeT$vWjxPzR`@m7=4V3O3i(wn;J+$Hl&_)6I(^Xqq=%i~T;>*;&Wl0o$dPSByd(vcj`Q z->nF|ZC$1ecc)95bw1aR(k3P1f0uzC&z2r426dNn$c7H#A;+h@F5~@E@_Q`1aRSh6 ze1)v)+Q{wol<`ZUjG^dYI^+TUqkn3iWP>#MiQZXo#VA~g1Z4I{qlxr~40dKD*d(7V z0LB?}F7)zUkT7h~xBWLpFg}EZN9?+UDkS8Z>+FG!YkSAu4kIJ>_(0Nu)i}Ky))?j| zEHdg(evS>h6UPAaF;M^W@;+Nd+2i@91zg=fXd>30tTh(T9G4S+s?@^voNSbDH>n@Wg<-H;YloDfj9Vb541KV=bp{kwQ&0BO`SL$uiAOj$V+Yl;3~=0hoJIP)x- z*{vzo^qLHJG~6x+h4hyV-W9>`N=;uA9hodJGUBSk=r_vdTme$cHtpevJ0rc2q*u}! zhGjYyO`&KeStt+4f?IuBV5t3?XzZ2q11cj!Nv^$rcZpZ#l@Wr~S#Sd2Qo%pBDhzlf;NX1*BY5o%ez@f5b;PLzNE207o_*_`{ zokx!17Vz{O2A-ZObX2uP3vqpS-;2i}c&yL+9ZbD7X~v31gUj4$>)j)Ka;e6~&B{X( z%4mIdT)*y`d8m%D)0ek^JL{b43ptN2^0(?r{sTzwTo^4+Nf(M`iFf<+vB;nuV&j+; zd_`e05uI_sE$0Py8l;um(JKDbSa~p1!)eUT4PL!zT1^2P`NH5OQ9R5N-k`b_iV$`rr)`(A~f#$bzJX&&5BJX6g`CvHTKcA!& zTPxTbf0$ytf+#lEGGAIqo7z(HFgO{k7`AV4{fcdS=pIstw^4K5_-uCwhc<(-1`0KC|v||p9 z1WK5oX`m`{G`~1ZhzthgoWMJI7g2p}lzmTB36VqvJv>k)G6k|j!Z19#kt|RvWzJXi zvS*cEs}c3(PXR{gn8SuUH;sG_a)hyT%Sx|hIA`CPG3!EC4=z!qpF(IPWsuk%e|2!H zlyjAoRCrLU+W2-$WX3{vsHloMI8K1o86iEe&2p$c82W)kpeI@p308J%y~~F@dR3NO zGY<%yI+zfBIs2FP6uQ1TyPbr`!j_q>4;HA9Q1g82=ug{^cRd2Ygj?ky^)~b z$MWj{AX_6(SoyPO&H_jVZYt)UaWe2|(0k1^{}}3$Mh1m}o7VWZHQMA9tq8g)GwkNL zKtR;#LAr(uGCKa`6yr{~v(kEZJ-wGMa+US6!h7k8b$=)ka=|Cxj_)eImoB`*w$umd z;`#007K74a6(~-}^9(q;=9Q!gr6fW$Kl)Cb+)R*@t zV3zgX*H=<;W!&nwU6x&prC1;faA)55YlI%yQyLx_G?sSOEAmbD*+(g-v75=~c}3;*TB;&rSk#P@_pu6IK6<2QDd!eSW5K$pOrI z0OTyX)<0bXc%*TH4ozRqzxs_SYDR_eTTSKiMB(nZZ!{qTr~h8IcEN)cpKt5#MGlzV zfAis75I^n{;rv59uuXKcLV`Nu!m_b&?6VoG{>Ia7T#KiFzW1ze^@^RBE=(9sCN4tO zs45^k`iKVqw%~y4aPAbRzkLy|*vQB6o@UshPzEML+zF|K}%883fSF=l7q?KacVQ#asv#MYlWZzyWH%?JnVIzrk$fHK>`F6$8^noOhu83ey8^_rVHpjlevjT8V5gu$*M&>#6lUAl{um_%xzojhLuCgN5 zM?r@42OipflX^n$%mNkAr4_NsoG%f-#8#4%(aDVl^xo@M*xOrfp-NZb(5Nh zM+us>8DPj#q3U=B*Z?sK%f{VCB#+4wMD&H6d;VvJ_Tu*co&Xrg;<@NCd~S(`>I(?4 z)VlSi>yldDWG%#ra$|^S%I#~pdiaNl^nE_ed2px5m&(2<1w^>Tr8#hVaze$-D0Gzn zNF!~-a?@rzg)J%WlbrOx>V__~-5@wHLew%qSojOJ_suKuv!S4dPeE|$2g^)X?2U|! z3U~o2=lLKUF>E5mw6v1ZHiDvO0T)WwV;m#OB9Xh~j~fXC+ql}q1_UX?Tod{5B)B#x zD^&c1g6}KExiXlb^G#+gJ@k09;oLs<_XQc3U@--ADL2CLHm7NlKdHGbp1_U#-COp? z_te6b4Ix72GPM@U0(0nM-wxW)GaguMnUIDS{nW3=ojrbn>902wF5i+Cbab5@{*V~> ziT;tp(G)!v6+Z}`2!{*Cs2di~*OYT31EWLYaU7!}Z8+l>b^yTW{?pSx))FF1pKPQTUc@!y^ zwUfz#o-qX8d-5yw7Pp3;@_F7rC(M%t%E2aAR29&Vu}86+DDy0@0Sg)cMqAT2i>8aK za7}b0xRU@%Ke!d4vo5_N^ddUC&aS2IEIgBwMaDMk2_+;FKC|MgVOI6P`ndUylBalA z5B3d@>C(~k`G5ogcr2SAcMt0UC>yn%@&G^Ldh(SdESWjNdWly(a^{(thk9 z_GsnH;af*jLK5h~vhUGy2P+d&MP122fqU*>L z3y;-Lp6^M1ewZ1$BiD#E4xn&I6fci`J+qejg6ep`%#H=s*B8Uv-C4B);d6D#E2&;T z<4~a$6EvoX-94DVdwpRj_a*}a0LDb`YWjYfXXSwY)$059=Am%qCHfGh6N8B7l7f1M4I=GlQG>AvOH;ReXrzVo-Z1g}@s|x%7z*ga zu25CJuqrAR!+2)z>oHh-+g5jA&@GUceQp<5iW-*9@~xGznKTu<_4F~QWEWyMY*p{& zq8EjbX&lrqkzJ^)HN#R({&oG!*e(WxRE{1hlQr4s=4nUREHAgK*r8GWO3xhzKs_Nglsie4SbkkLOz70s0x+>iwm@3sVn=j`>70p zf>`u_kN>wkJ%(5K-suVLsQSi=@BO&bQDJqygf;5Wr9^Qc#-^GpsG z8CSKtO9WG;Pjxq+O^HxKM<17pKgDB0MSjEoatdY_x8#8CdcYmuhG#qc{5R*PU+3DKRiD)n+U{p{D;rYk+sK7)&vZT? zh=M!IRy;F6y+a({+6y26%4Pt6V{Ha=5J1<*ZWSLML-?%I`aJ**D2F+HLf)wRjM93# z72IHBuOLZq_1@vQF_N0CkpcM_IfLMLoMgNeM!#zLg^9j0xi1S;0)szzNC#y;o!6;e zkluciuuM?J^0yoRPUt-GpD&O@AJ@zR;wJMa~sbj%X)Tj3xTtpi>C%1BE zoH3uR5hm}{FM7)2R!N3(ey3=a#soEji5+1Sc#S@+h|faOF3D-weV(kK=OI5Dc-G@Q z0N#py{J?6E_a6K%pOv;My8EBoT+dgynXD5=><8#}cnhUz6(2qQxXmBHR{(+GV2}r( z2(wo*_%UW;s1C-SomNaNAm&Hv{)SX&JGz04+bBxCk3I2C!=>QAsy`361Sfccc5iU> zrX6RUtbk=GgaH%?O~>}+a(kTCVs@>|ql)i#ss^gro%Q(RV@71Na&fzN}p3J=>r8)wz8H0cPe&i)l-#YkYP@JAu(_exQ z6EF~$$@Lv+QcFrBWut3g{4`dc$=-J3vOU zC3(n>D*bAGZ>GH09%wr7!@I$b-)Ni8=UyLGLbmD&;!^MC77}!mkj_Hw?S7D3ylWEq zhO*K}9%zCA_@_H7TM3-HARF`Al9KtSB`aDYx`;(L&%`T(@E%-sS6~0)J>@GSwwp>j zt@d!ujhb}=G@48q@FcD}{rJ@Y7W zn|vZm?{#4CBR|KHm!gU#9SyCD5&zi~cW4!(hMpCOdzrFUA%$caCQNTNqqbQ2TW7 zUE}N*Z#}?h7~>b)5c~tCsYTbtZ7p^WE$f?ht=&IxuDxTS_9GXO_u$A`hR=7+;pIRz zcab_hw9Z8sYw?ZLVL;pm4NdHUqM+5gkUMWr&F9CuzYnvv=%%?K9yvFR#OcZ zo##6woT(O>#6{ga9mh;IZM*D;=uwmk8J{S5_J3LNMJ#&e2X)^vL8oez3NlZzpQz7N z%sD1w6_A6fP1&GxGgLngn>pXeb@sb1hZ zgXOyO!R!eS-Q&h3o$TRAhT&P${S(@Pn*h3@SEtZW7Vse0mG$?JOW_wQ+ug^6ER~Fl zn%gNsO0psH8ALeEUpxSCd(K+VQi;eN&6gD1Aa}t!nfYcS>eO)#O!$)dL zFHrc;M`V7aEpjoSB12RO4{i<0jRW~0vz&xLcXLr9+AMrt2|Ac$g!J~gBX7+8LTLF zpY}9P1Kv;w0hT3GaPf6Tu^$Eh9X}^xsBUc&-<|oOA~KRpymjyz^tcIuLY4{?Mbb-L z$D9(@vwM_(9Z_X`telDQ^i<`|^mOy$cKh+ywr@Dgj|60)H6f8yHb)XW4%hAD`loTB z)gZ`*YHLUW60Rw;(6?)3#*+mC>oq&Vb0QG%polw3yEvY(AmE=ua;YUv1|KV`J$ag8#WAqpjl!ea-prM$g2Qny5kFyBuNI~WoD0y>ZRtI z{JYq#{p(zhrDYXRmbd?TP&r{iQoejJdT{)P;$3tL*jgmH5`h=Lfd03TS(1`XlE~X3 zraO3@n06n~vCJQX72%mgu@}++O*~A_H;la7QD$tpe=#g$^mUEd-y092kgBw_^v{4n zYqf>#GryE{gQXDdom`Jwa1!W1yvM^P^11B0VU4;`NCoC?^)y45uh=%v{njWhig6Qc zSmRYQ?8L9$`<>mlH32Q3B-7LD2FQ)x&+xAurA(BQ@gcPDlU!M91EM0XbN4KlZ7d(l zK!bwDBJ#R%+pFi6y!=`Q%W4^3+c+|j>p$C|h0)!FZsPkfU!H9ct<+8L0LIYHc}+?_>b%I`i%z>*_Vi@t`r z@O`-O6p%RAPY$~HtB(2jeX8v%i0WYJ*eS=~XJ?z^H)i_rp4eA{f7n2|aX81MzpzNZ zGMQJV|2$JBpf7pWApC2)*#O6IcyHR*O3xD0AL8v*VJw+NvukaxgC>{PE(S|B+xWpp zwysts7M2Jr%r5S?;x!=Y>qIH1X1U4cwN(8$4@>vFJyny_ zL89bMSHoWgA2@l#v!j7&#;=S`XQX5XBUFsv!{IaejiFs2lE=`b^7vyM&XJ-72 zYh|veT|0;WTVq+)ofI_BMcZ`&$1n+U&zqab#x=+wS{P6HIiSxyuqM){41A_CyFY6s?{q!`$`<=nZ zF(Rs-&gS=Tf25hV`Lt*E1TrD#Pipk4hrOMXIC$j!le+}0hXl(6zZtkFoXw=re#}7) z-#Cc_MSZ0MHKr5l@6Xj2Y3C$(00;x0L(5(CXZM%LO#*K=h>f5?+odij807%EDe&O} z1hjxF%F{}+I8eI)Aq&W-M?4ola~4lsH+X)lz?}^mzP-)qC^}Q0E2K+E<-kA3-=I<7 zKY5A@ss%T&`sqoMVNIA~V0%rzUM^fLMO8;ew`^l9d1)JTypoi-A#a`1?pPzvccRMO z)}5fm&u)LFj>h}j@e5X^4Ja(;%cr-euTS=Jm5z6Twk{V&=)4P&bbhJLboifog!eXe z5&}qRm3FRVYX8dppwI6U%xLF}P5r}WC+?0^l;n;&mz_b@R`&p@ zfF^cRFpFVQMCe!vCZd4hH?`pRGQSY8yM&lYGqo%oMV ztaPT^OpJaF#pG7ExOT5L$S*SGT zZ7JZBg6GV!q@w7fRY7jY_sm`gVHQgOKb+NDVAH&emLb=x=4Pi*&Ekd4) zy%D+G$Fgb7gghy6oCC8uh~vHaEGnMqbHyR9^J>Vllj{V5)p)4vYQ}`uSzRWJMj9 z*s3+yk9084y_&2@vG;Aa+61#NghCS3*o(&!@@ETZOqUQkUt7JtoGPTAj4-i4-fk<` z{+=!+^(S2ASAMb)#ld#1Vx*F$y=h`lw_oZk{tg1pfEmvn%%fY$G5~3EI97lR9^2y+L`5G+9l#?#%Naqo9^WI+$K}vPg$uE6Q{vuIrL8hH1t8Ii& zK#;$lT~7W|Q#tQUqs&&@sGbl*u+Mm{EyTWdX0{^qNMk=oX@Munm+OhOzBl(91r|JU zU**IKoB>QZhHhaMo5=*>Q2gNM0JW}Q3eYi5uU4{>c-7e+|F8CwvhJO?^qODyCDkO} zxxNL~6`Mj!G*Dj^-?vb@f$$QbCRi=v824*LwX8I5Rq`BPSI?x%#AwQ%9co9+v*-lk z_5ksOHRa|Sq^S-@qvz&jL775`_iuD3EldRCXY8-9UMv^LD#fVEOVZ92bxRLdJh$t7 z`p`n1G-!diZ=n_+jd`8d{-%`s<26w>pVtbHBa{(Q?;%`|`|(9u?6l zzpXCQ8$y@=spPD6jwtx{#E!IBkHCAk=@o;!C(FW3rDgr?ps^miuWy%!&g4=_+wM*- zDKG~x!|?c|&i%B6i!m@sp)TVhyg-kAM^-hxS_fnz7(FPKmJT@*o9*-GhM=q9momor zx}_HyG9OI}`wsf=rvC!!EZS<{*4yX|5(y;?`HN?MGiDInG(PdXD{^OcM$o5k7a#7o ztK2C_THoTLB0e+mJY%Z#+!Wk9Vf9@_z4Yc3dAa8uT}`~k4#0}a!#8aD{>rP|T?^>a zJHF|2z${~q4uEl=+4yM#+c2%>kABhDuL12>$KaV+aZ{|&bPW4j)V0B%{|{wf9T(Ns z{SCt)Lr5c`qyo|)DI$#0AR*n2Al)f2qta5+NGnK7cZ`zKoq~vjlz@bY#Crzr^T?fh z^?84PaX!pG`>efU$J%Rs*Y}PhUPXOH5#?$tx2#a@wIuQ_P7nw61~H&`&pCa=NHbG< z%FBH(igL&fjTOLay+U?)_8&qcSCrCN$T2?wnXejPm9M#Jl7+;ctA~7PP6M|d*7?dE zrb$lSE^|sn^j&(Tm)tj)75RLt!cr4_$(zt)J?b4OVwNLm!8a*Q@WW$&eq6!?S~4Gh zVq2?|5d&v)><<5XR&F(bk87pivg$&eu{ctgjU;<+P=u$k74#$ zfYiFDXfJ~`4L&=Vr$RMfE_G!TbDTx-*i1b(f4thQ5+5CMSO*H=D3WoK?OUgQL(z0w zf4(#1TKvw3nVprbsp!R`g5H|ilMi3T)X2dxWe3ryJZ7jLM6{bwxH|6T8r7py`GWYJ!tTGc#Q)PL> zaf>4C!LMWdZftmYDC-{b5|IaDKihS$zH-@}De|~s!PsbtlRfh2i)kUboSn{$ zQy69UIgg=jFpdT8A`6f7E?0WIr#+J@MzOgg)M6RFxc1uRn zjH=c(^;4~d(v~04gLrw4_AY^{UyUU8YJhJ<3wqc~)?0hv+qrn(RH$D+ZqT&)_LdBM z590Arbo~H1s!0;hH+YaKt#?R8d}$n)2%MNhzsp4n=`+j(x9!w16pu$Dw3<}hD6?rI z2Y)Qwo_UYPS=m7xl$KTyQp&Oa&}MPTJEcMFrDI77Opz@xr95y~D-k)gBCNP+zKc^) zjIlGN1q62awaaL$RK-?ziWEisfa8pr=plPX*_H(-ryCLX@&irDOZJLq4pl*WiGh7F zUF`*v`->-)=JfH|I@5=TYB6-&v_rBP;0ASt-tXm?5`WZ3;Z%$leG({b(yuVva?5hi zXAgy#5+*RudTqD__gMtr|AMnnE5u|kPd1jIKL@0jngY06{6(RK#1*U6ZHYwo5ePYP z5HAPrjCF$7J=X77-#@IFwv01M(z!ow*#l~--YU`I`${l(U$Z}6^Q7EGW|Pl&=*>*M<3S&+ zDQ-eppW{^=V}r|G3!d6MX`Ay{doKBu2Y_YRtTu1sTPkb0{w!;IoGiL3D@3}Mr{_{t zK?wu*=+y{)jo?XF5f!b6m9LGmu+j6|TE8Snq>{@1$kl)S<=X8^X5#+DObX`UoIv@@ z$Kh$$`LFnGhXr%+vKRKZv_xkGWRb~e>==zdmSDmB#*;QYqfRPw>D%mdxXh8q8W2Bc zYPbI^iqVxkZ1oLTL}yPY29q%8hf`@5PQ!JH^sJ0IYn*|`GFJKhUhc6YhzH)2v?)*X zhp>Ir3p3qH&k>>xSjIMaC7&B^bYxtt91`JqNI#oM!^GgHNG` zM4v8gZ`b;gMY$P$hQp3YJ%t{jWuC-+Bsh56d61KJ<$`;AZZiJhhpX8EBd(Eg4kj|9 z_vNrE+Ob?hvhh{4nOS@sIjAC%0PytglvLrz=Cq)1qe`HeiwD{88<2_PbY$ERS&gO3R|p}cx~7}2fmY^H?>9s+)CbZkveS~T$CZsz z5;3>Z)d|TfrPN&uwo-|-6-yx*d6kowD7zMFxa@&MzJKub2u%F)Bst2x5B#Mg@xq%Y z*k2xnQC>-QY5==PnYYQ>TW#&~Ua$Wi@=~`g)t$EQK&8L84Qwc1qBC;O?BfW9)C0kS ztN_Q`VgWrj;)!3|rn+k>Um@M2;}$pCZ{+?c6wToD#&gs7ZO3q5MF0Sb_Gs^YT_pge zCdh92B46Kj+@ZT|kFMYf9pR9~HY<#UetbUWphz1WhiTSJY-*XBQzTBQI8xL&E^$i)w!EY z8@`YhzBxV2H6ZiU==k=0@oYTL@i0C22i1x&VVT0@9HQfIreq{3w?Kvn)8l$x62IF_ z8(Og*RCVfCR=egosXL-EW+=<3RaV%P^0mPwrrk#3R6D)$C3M0@$T1pcVp zH0#!R+uUz!(eAd`{qgcCLM^jG1kE#1`7rik3YLMXA z^Si3aA}voAV!p9$Z}W;TkG+v~H;5dWNJ)%7k~<4n)3n#S4=5-7wj!xVl6OM;T%J)X z`P(Nj>(2(Q1eb%k3=|eQWv-QpFSjhR&MG_;Gt@z!^`cq)IhU7C@PXeqzFL&+%3Y%b z=vc#St%uOvO({)?{@mNQw4JViU;ohPc~v)db!Dmi2N9h*qj;cr+4>!=2}!!fSPil| zlQwp@1KWlo8$7o>J?!BWO133UCf7G+M=cv_Rd!bolW%1x#_XE%7bJU(4!_*t4)hoF za!?!|UW*yAC=Uu}nkUs{pmr=JiiiP zFIY5BoUoA_bC>g8(~3%@U(!}rW6d|E$+zS~x`fG9ReHi{>KAte-aYhTFnBG&QyTo? z5-0Ug-6z(A`M{Tn_QMwb;=t_=cavcNX5dw@{S|!6MjoMk@tfP0&5sF~JiC$+4llX| zlccT5`T9&a$5Z`d!@%9%q%7kJJRXTdeXCjKPjrhtYX)DMSo)Umd^yrLcyweko%rnP zOaZ!gH1x2NIaO%)`Z`u#W+~PuJ%tHQ3SSjAnfFfNeA)dFO@b)y6Kf#;%l`)gBVa8q zu3op0Xn9zp8{1p@EWySdzlKI>wnmlgW0(bn_Wh=-QG5cqj3YY|iz-)qlVqQkSuLj} zlW=7_D0W+`2xCsZlT=8WW6dP^_M62hQvJf;gqidTMo+* z{$_SR$esY5qDcYc7qQCeqeh#<}7^qdYNn76B0e^ z!6K$|)+Q%TWKT?@Sq*I@kMF^`o*zv};UubuioR5QBuL?5qLM%Ow&fD1#R!pD3i0OA z+;`S^gug808PS&p>ci299}yoHq|-HTivHAZHCXap-8hYLGoPu12+K zaIU*y{Rdn@Z7)C|cVlZJCI2S&tFL={YsMp=k%w!kMJ56oVTQ2WF@x-(@UrNN#OSYy zu=V&zUaLM)PeEWi`P1zC1qapZ^j#<1+0cg<39wWq$loTL-%Jj}dqh_hdOUu33Dq^>;~9ACsN_`t#pJCl>hSi@Y$q-V|Z3>?l<6h`~ZG(;R zP3Jn}abG6VGj~|tF3Aisw)FwBsEYOXMlw1c?@1IKGSCga^}K9YGkpB)aI3+EqoQs3 zC(E>*=Jr9u3zwXe)sy08*&7v=1ge7{wK9owZ5dV{{xU&E_xF%+7nwXyleQwYbKC zN~ylk<)QYRwat4fhpyaGj#PmSM(k{{wQBtWt(tdz)^;n>8*QY{3m1+#>S20s zm}@u%rnDDB?61T9fZyNBZh%Lbw+<-jtH`ww%$RO@3y!IAVY8g01kIVDoaD<<(ty3s z=`nwE^=0@skt`h?;d#T9VG!HgUTj|PV^S00t+bR^h+Kmm%G}{Pr?(1H5=k{jM>r8>A~&zW8w-RDShp|AgiwotLDV%bko~--3WU zMgCc>wiLCPI2D^}TB^7epb|fVit107fH;=wfm#QjOaD#0&)6{3A4UJRGT(L6Z-&T& zQQPjVzDmU^gIK?2dB$j`j2PL^*e^Fr`IjrafU1L4qAA5Sa3>B5ZX5g_b?)yJR`*;r zVJvezADr@E!BSX~_G@T?Gw8F~88nkp#zL6Ba}A^wI>(MT1diu6R)#f#dN^(X7ja z7*jJCtW~Aqp!%S5_TerGH zD(7hRjJ&wt5tYvmr3@wD796b7n18740t6xW5$^|kt%r);TyrkbaQ?pB<)A_}8IG^9 zULaTSDuci_5A*f(y13cQ0ih*xVq>XZ(_k?5h0cs-Ztx`b73b9B%xUMZoi56Oa6SB` zmB6eWatqD+J*LQ+ugx&`LARr3m06Q&nz?RsvyuV+u&2kT|WZ4q2a#+s1cu+;%&03;2AP_8eO>2T}G9|*64V2|NHw9c- zpf9rBCO_)Ffska)bvBb5kaWCLLUCHQQCOW)o=m}ai;wt=yJL->`*)65Ino!OXt-|1i3b`(%1LIWtSxq%h)pz2 zPJ>W!d`>3Jso6OceWhkiaBDNIJNc`2Hhz{ac7?rdFJ|v{YGZZ#*s=`@M6YE?fQFJT z%W3*JUelV=30}IdDmOKVt1O=pqs|9=Z5ZrCK+qg;MB#Ovpk2@YCDsfQe?x5h#}B*R z_6(^Lv9kE;af;%vrNsJ(c{HGI;w`N1vV~BXqM&N>qk-|gKGJ<&$*T2X=S|92EZ%9K zTk2mBKgzbmi*r(c5ShIe#+*#dTVN@obQEH|serS*T`$asZO7oW+eQXq7e~y~@DSxb z9tPslgYbExg_sylFF^-ykPxv&NM76WzcNTi*Z}gFc-T5l?Mb93u zw6Ao3)IrYkndh`chQX5Qqf)os?EE^>P;OU!>=wWKo|*&vi<5KiGF7#WZlT^o>NY<1 zZT@4LHXqx|WTaUKN$xP4Ji)Kz%Vfyn?BgI5&>m`4Fmr4j4NCtxo+CrhM`(Es5jF~0 z(YjEA(YggJ~lwi@}+#fjXkc{QwMg_CV^$R`~9<=Vx|WGNk8x3E`poib`I z&>kdPyO}ZFUv5ZR|Kb$u%F175N1j>ST62#$KagmTt^826y$|Lq(jMcNG(Q)Jpy*4~ zPgbFMsd*4=i8!RlD%Qsc&a=lhmCuj?2^)GyWgDc&DSjqRX75*?g5W4nZW`fcOo z`g5q1lfibpB0&k}+2lzb2c&uo6AiYniep(<6vo93ag6FH2}n(o)>E~yfgI&FJDdo8 z^?pw?+M!{3fyIxe@%Yiy$;g0V)v!ISkR5$1iqA){s^}0<5%v6#D-iXvE)e&GkE+??BFn~ONR-%@#z-wpfA*_zQFPTbAsP!B+#G$QL zxnqpRw!PI*=47DHoLaBiTJSkn#WiJIv_q~{iEkU0GpZ{u>g3`dvY{E@kV4*G1r#~v z?amjY?%7ZKBv9ktwg>WNUM|s3M09J0S9-^?WOGtGG1CBY@S=RF84d63PfdEV(K3o&<+q?;CyaZjt?O0Me z1tsxs6m!=OREm#YQkHL=YJGh$hbdgW4M#5(H-aHR?hT$#G4U@@NO8A&U*g-Tfej~i z19^M|xj1odwmC*K67oekpIfm+M>VJCu>90bifRrg9g*M#w{CEZk$Xtp2=0Gp|3MXIM@dKv7$RKdKCk{ zP4h~P89BbW-dAK#c~Wj^b{Wz(tD9>s@2SHdEvaH11DkSj00;x{ttmVmkEJ+8O1k^e zu&&^VCE~qx?F#ON1oq-AuQB|SsZ>fjA>@+v9ha*JoR@YH;l}|Q!`AyOH}_Co&R)hS ztZp$W^T;$MmJkBqN2qPE0VoX2daDDFDp&08K31GL@TL95Xvk$^!luGkYaHA(Dk2wq z`l-Yd#IooPNkZRA>hMnZW1`_pCrG@m@kt2il~O2PFQS|14T%K;Hi5}16#v(XV!vF9 zpT)z>LD^MEDPGc6D(dBY#EThvUxc|n> zkO)c-gBQ@l^)?oBlEnp@NN&Aw*!y$_21TPVE-~2>GW0EPxt^IDWf>RT+WM#z66p!c z+%NW~k@8fV`}%BhyG{=-{)714}z9$LPF%k zKSdg(AAFYWy3XUBPHEVA9sUP!+wpP{t_G z+bX}T^r-@UakJ0Pd#!^BjO|dOpr&P^ESuK)xn|a1KYuKvYGcdp#tIW6OrSUNc3w}{SWrU~ zWg{N{gn!VN;N%pW`GRnKNi^xljs+Lk2D{~>d$NjvnZZ^RcHaRQPnwj)5ya+$DHrFD z9rRK9U@tEDqR$#_V5FuciGIh*JFodN8Q&3of&$r0F1g2kNi8wNL{CK?$Of(eNkVsk zWPd=+0(X9id0l_h*nHvDZSOg`{9?A{Yn9D) zdhUC4v-no*JXh#FOihzv=14~v=2Khtty4!Bp8%s#q9-50TQqzYMiDIqt89lH6_@9Q z`+JN8*n~Jh;9ZNolB0L2G>BUx9=6}@J&r5dBv(TXIPLSQAAAlJ?V_8NqdOM(6jOS0 zyH-hANb{n!{`Cl8YBSdkxx!@Wxsga$GZl33N2?DH_lug z_DWhD9|7z8y^+2GN&*ktHD~$`_hQVx=7VRtzpY=V?(KQb^9*qay`d;!Rewvt_Xy2Kn}dKk?w@ANfoig9#!b+R@z+`C3D+~V<>2hj%Md8 zpJ7iy$t%|}?+t!9HLj!?A8KNLiDxU|5B9GVz6;fYT${08jUBy70E-?3 zkJjQ3kT zIi^x@D8AO)R#zjwx!743rft={C2!C7T0f~R!P7Y)`mrg?xGQ18{7J=8Q1FkB~h|S6eNt<=AvM?!Oh`B*{;E-Mm3P#teJP z{WgcOXxlt>=HQrVe}x)L7|}MN2UEWjZcBmZUhcLX@}$mPqcVtufM7c*V5_jSxKCX0 zvtQ^=m9>_aQR9rRn4j|!12~Spc;m}K|~}Y3_)djlE#2W&ZxnV ze)Euxh)JFUl;qx0&z8d3Z2_bnmm)87bTR&Ond9F`;I55Dt>|`@Tcli4FzyyH%oqSh zzV0O-SDfNa16y&$j-lA#j?Sk^XN(tw7ZY*Z&)JHCnFEPY#NG}81aD8Skz_IwjLyY zw>fUU{g@E&Yx+s2Z_3fFilDi!5_?eRT_pFQwUG3?4Z1wT^d^cVP4IdHFMRAhpCN+9 z-1(K^V^`V|2f*8F?XmAH2*NScpF;=Hg6gKda6BnRAoaw`6lw$nZ{BL7p+~$e8zf!s z^}9mEJH82I(>5dt&+JU@{&kB!X%F6rMcy|fBZRSl32FctA$)q@y!rJ(U88KWN7Npy zpg?ifua;D$rLRV&H6?;F(uWbmAUKRKFxQ8nT*3*b{u)&BY?b;YV-zjCgmTJwOfG`Q zEcY(maqnD=6WVqyfsRnbHA3!?7(yhlAtMcdzV?FMCSu|V0t4>Pd_uBLFJylQ`epFn4mR}%VFjn)?3z@3FwQBmt zNq@3IR@*QN!IOrwNWeSQ=y1S7s}1oW8Q)9oxk%$o17mf?p6|7YU16inW(>uGvqmSc z4*lf8@PQ;_wp;cY`7M!I1*HfSr0!X zhQf#_`ap?YNG||-*zGKL^SS4=G*|LZqM&4`RWJ#FtwBtH+-q_PlvjNsB>oYOu-o=) zhp%2H&be{OPpMJi3WEyLbfJJ)=I70%7N-vRT z$b>3;tzzNzPqQ|p%$xj)ICf3!>xWA~c{fO@06fKBC1uTb30=7efJ7I~7t;PL!@**M zt(Is&on|ez9jO?2MqYyFG!}QVHxR@x*7+BCy1;aE)vdB>fDO0yBX2ah+bv;c+#vy!8o6!4ppr$-WUL_J5|r4&Lo|S-FFGzUsNl{N z7?VL5Tx6O%yS@^_R@t=t@&Y;u=tDvh*d_v;t`P%UbhzZCeq_UOtN z+-6Yt@m`_jvH%nIq0QHK6{WU@`%X*AZ)Z)5(61M!4eb)qf+w zCm&-{b{v)zb_!H%p%Dnc{^08NX|=haTaXhk(US1=jxT@5L@51-6t&6XKmev|!rH7^ zwZh_M%}iT-thXK@cMQsnd7Z7^%VT2Ty_+6T6>ZRmU`B`*H6HZ!6*w^Til!VPcQQJV z)Dn~Mo;&*B=v@Xqytt;6!%W6VYK4CI8N0VLwn(|_rz)q!w0tfksX`H6UsqWhIqg%DIv3|7g@_?QP8s_`WU-sgiwUUAJS6JpvFlyY&HxSS0qEht_k1>v*+F zDB`)PYz`Y-$CUE(o?I}SJaG^ua|A2Hx<@#Z3GropaH=;jE9=MTk)M+3(BNL%lE0J5 z7PD$XZBoQj*Mj8eD`eQu*vs`0Tt_ib-^(wWRr)_B=r@i@ltp=ARgBmUtLgx1uK=u} z0-~1O$C}EPTGqNv0cYja9$0!FA%z=w(4F90yMGyuB=!xN_3hM8ZqE=E8vx!vIWlz@ z@Jva840+qqT(SAwlFePQhp83qmt4I=wxvKbSLcDUodezz)>8HuU#5sXY`?t2wUO=3 z;XtxAp@!e)yT-D=?FuD7wFxpaVFfu=+HO1;j(c?*<7op6*kFmfbwqOjK$8@Ld`Y84 ze4+w}M}3x*1evyDjH?GhTT9rNJlBTV>Trmz3q~*Cz1fl|7Y{iZ7*wM2!*xK~R9}dH zYJagXbk<_11_I{-8GdB=#tgW{9x0IfaYBtA(2VEbGXi3}!_z;A)+-Qi($Lr+z2DcE z(lyZ;x!e8%DlbTG@HwnX-w$d1(4S$m4W)8i$d!DR(E9sYg9s4N}m} z*ni_`4gN`f)xKFzBJ7w2bY+y$A(hWyjFH>nE{sDRGECj&)Qd}9-D<@JcN)?fbC_n% zR7j?MnU(gvm}>8_YNv6IA+EZoOtS?!e5|ny4TN>J5TU_MGd)UZ;Fy4e!4N$9LZwN< z@XLOgFnZJdV6{ujcTLTRs>VI&`o}x{<8LFdeXURF-JIT0E|TM-VE{K}iL)aw(vi!h zr=`TEuJsUpfNTvqD2xOE;%UqpVC`xISj>np<^UODw55$;yHFDwIbwrGEJ`y^X3E|a zj4>-a;^%u8?FyC0PHkH*V~N#;dG@d@Q1phW%ZcO}m!?2XKLS{aexSvV;tbW7(e1Jn zAZr5SxJ%P!CKpXPMeyn*d6Po`RK2zeaYCMFVQJvWpVgb4X=W;%r!#^~&1>IFg@d8U z4CfU>c_6EURNZ3q#*azLahYv4$hk?C*_5y`M zeVK#zC&AS|ZdZ%&TVweyRqgyD>4n*kIB56&2ztkAu#(9#G@7|LN%HeEgZzS#)Q-g_{gT{jA zI!(Y-O1Q=M5)mf6+B}M%>9yimNOFWcLOnHnuT1t#xt8x>>)rM?1kRj-pLl#0umtKG zh$*av>*!9mbe%-AZ4+_O8tvB2J4RC(6*#_;^O83~8ocQ*uMR#opkt^k*-}cc^09t1 zmKaiTGa5)T2fSVwXutOFSit^WNbwkZ2OkEiGUzKfk%gXEqT`Q14zFC}pW zl)OB`@gIuxKkuY>R?ltUo3*6nV!}?&{Z9c&CEfmGQPJyi)uC64rxp!A zx;xV6HRbcmPjkDmOSoIm{l=&lrlo1`#BMa~7#-sZkn(g>L{Bdj(VU15RQD-9yr+S; z|A;Ei9TNz_FRQrCCIVSs`(#(UllbB0_qtx7B5Wm7XA*&Zb?sH7K^KWi##8ckux-4b z5FUoZj{AW`P=r2Kjp#kt0IP&ib=TZwR~>FNx%R_K#S68ShnG@}|DC%0F5&s_E;l~7 zW9l_8PIyQ|PZH4a%OD?m@Wuq`9o%hTt5#qE%8pDPOoBO+WoCFLMrH7N0aixJs%S)< zXrjdlGRtz}NnTC%v&oU{TW1>51}t{d$yXQt9dsUog`mS z_Zt(r3stLlh;KRA)x1dO*4J}4$T<3=%kL%??|Bu%I<7$&-hg-dXlDD95vyv&M$RCh zwzR=3lJQ5R^6g^y?cW$upv!kAKh*!YO86KH-ZmgS+SOFzmyY3-u8g;=z=(!5mZ2-ty!ofC;FnpMbI*qi=bd} z8>wyTyS+P*3Icdh@F4RjS-Tm}>lg*@w$OReFW6!oJ?pG6gw9Zt_=F$3Pb#aFRMFP^q5udoxqp|Op ziGyCG<#vQLeR@?$?YaH(o%YrkP2Tk2MX{6Otx)L3{3BcP!!p5vLoY>^KHht39bV;A zHFhR-EDa*?eJVWTjB+zIH9kGxzI}mshjEWj8#JRDb}WWIfj&M#(M%~0qYV9YsMlJL z+qd9RXfz=4A5Uf1~?;E7G&gN4}SR+ZyUE^ zo!qVUqF9{%TD-FHC$QrZ@D)+QO$uy2LFVFhqR@od7fNHITY@6IS13Kx?o8fXZf+*9 zpC+Q9Nw{?@Y5yVDF(J<<0e2>+Svh1L2?7}JmNP}ZsVF@ka*hKK#{B#KP;K-FC=kXDD^6}k3 zEo8D0lHx1lLxyY6Izdw})ex#L3m+;B;Ii??m+FAxT!FA?O;AxZOb9Fr-V?F@l$2C6 z83~<#JFQ@4DA57S2O0J3qThGyX0bq z;+aR!EiZyJNWy!?;QZMp(WH2V9B_rUMgDA#5y}e9d|uoK&upvRFYtT2UI;LN8W|&3 z#2)`&_dvUK1w${173(kOp}oCz^#B9BL%?dY>X81z29CgkUnI~^kWT&7H^UFHaq=DK zU!;nLh8TmUME5JewAo%W<0{wPdnRnVtpV?b1AdXoaY9vwkwe#YEI7gGu8sR1j4}X9 z4{CPqZF>UEa5LS#H~WQ-D*!v$^P3mN%O7npg9L8_CGK?GuK51MkTc@~D4Y)bMPWW* z`>LgiCOWxja)ihIW;g0XXX0 zCE8byD+=`<(icm8w^c272-P-yv3iGtNwJ2Npv5B}2sBu&vjMPlDHq4;bpVyt_6i5jl?oc+4= zY|)tB34?M`SyVui7UMC1ZKle`ldH3A^sOs?XnEsb=Xf$e&=oGsTkG->hdhL7Ng=#o zFM|8UzCbYGlW|-)1qc8uu$dP{ckMNDL$u z65dixe#LFrXer0$cS-+r>$B+$Y;!f1?;;}zU4xFwT2F+hzJ|%)%7P0(LTf&VPe~KQc zLUfWWo+-XHV@v?r2FHnw@kBr{0f%gk$4qtCHXAyCshI$sGflN!HU?o_woUQQx%kus z7rcngexmggTSy5T6UhVsRVx52;rrlQWr?vx@Sz;SdqrZ$@MiHW9412|CO~tqttsnC zYm2N1l=UvM^y+r z(H1|oCf%$8EU-=L-9b9llVUsoSI?t#*EICT8CL-i>8OvX1vW8XF&Cd;Ybm6JD{>>$ zy9WA~dRI0n9ljxJu$&3(^y|fVaQJKUD2}dj7#)!5lkNvLnfWBbNJ>|H4elg;D{c*x zAxA_?1`L%D{TefEGr9p0c!8 z+(G&nfe#+;m03e1KO^j5Bz%OpD#$)UKPy4EA5*G|10d`xA%4E+6}_)1xv{@}0EANG z!CgRaBP!CImZNz7!&E3PJchPR=UK@Gpuh`-ul+mW1D1wkZMDGn3DId8BAnq9rsV%2 z^Hc=0TaSESW7|TYbSa4L#6XvAWQffq_72USrRD=hpiWx9cYr7DjcfjZ(|zxC4;Il@ z)g$Y618dQ1q-=ATD$HYZ94X!*#@^ zDGY91GbBpqsivNBK#PdN!}7$YBrhEQZ&V?N0}X?YPJG_0wQAY?`%7n<9*Y>a-=e7l zkR?HX>+)Rh=N`c%mSU{Q)i)Bb7tR~uUu>hGn{}M=xaW)z0RZ;?cW=6&V2g;$@>7eF z7tgEFUk1etGGz|A?RFDW;?lqTdVcgs`Yf6IJ=BB%tdL6@=WX&Y^Nf^1e6HV0c)$Rb z1BwR!*J6M7cmgJlpcRbER{fWjK*F6{=D4XQwqC;1bL#i+kBCVyRRj(*r|t`}<$rRd zi*QrOUL?B)fG9u5;@><1C&nyDv?Q0 z4}dE|ZNDjb%Bw{n?dE?E5DlmwbS?F^9T#=?Qzz7(02O#fL~-l>T8VYHSlvIz|IhYp zpmk|~5=|X{Nvwa+g@z6Qku8O_^XZ#1;Qw#s0%m;~5$_LL2mB!1fB7^a{izt^A%r$4 z-1*ORevUInKmtBMSG?^M)#HEaPbbl-7`5@K1I|y%|84-q6lY>2$56b2s>3hAf8B@W5Px@?S*!AC??XRy@(G za;3mY|M_v3OPLPho79pa864Bw-BWh`TYri^2n->lgU;q7Cik~mrbci_2JzgL0r-NfwgJ@jkRUA4cGKQX4fbowon7ys(h z7``-YT^;n~J7y~R{kgI&plHel5!*Zi?n~~35p3HyKduO6u$n+l^WCU zrrWIx%zvku8uP}twvr_3-JN^?^8R0=-pofVt*yQpsTY%&-BwvSbnPe(L8Zs}j2`Dj zSKQyzgJ6ZT8IP$qQ2TDgj;MJoZSqs5{fDz>pWh+w=(yZ&wdXX%@rQ0);c1|vu`Vy? z7=#k8&hEDy;}QD5j|;pAX(4W1INuKb5L6(3zv_=~P2w@3T4IK(Krkqau3aBRyR<{?87z~>tVV3M zGLl^pt$uv&XQb$$m8W{l>aDVijVj7tO>cl-sqm`39=WIUakF#?swaV>BhSvH6`mwR z{Rj47gPKiq-e5A{bKEC;vuN&j^O0&B3Kys`Pqq1vYAB>^^dNEFrmKu%{1>TTCzL0(NM6Z#ZSmV-A+SO6 zk8>mBl?V-Pd={@hZYHLUM-_?SnQ;kXIVPj%)j91js)j}#u58wjvX@ST+K!W>sD1x^ zy147TLL>K;`hGhmI0q)@1L@Zdm5kpeGqTt1IwY+NKPaNess(&%=_SW04ChVY@1LW% z&V5Za>Auu<7sh|W=p>`>uMPzvZ33Ml>m(xP!F9hZr4@wW@TWKtB>Q8%+# z)0g2W41hq)=EF?+F?w#@{cS&V4ys0q{Vu~Cf`P7w5r`c9d-c7nUB&PhGARD?LLJ=<-bnDlpw)~=mV zBs;?Xdx|cBO7<;tRXLK$1+=ZS#rVqzdqz-NBM47t!o^5OQdCi%qrIAQQ`M?`t!r%Q z_Igh#TixLJ!>$`#f7khOrBe&5NGl9J_a+LrcQK3RC0CnNKYIEG?pEz(H2=`EyM|&% z0l23+9T<^K_xCTH?+c*`Cy*U5@H-G*zR$HYIc;4f&Cyl-_rV!yo+|Zl(YwXp+bSu! zM^BKzQC9VK`Cd}d#=sumb#hc|%{`OapI03Z=1rMEyUk6zHfP_e7H2nGmA|X5vfe3w z)UU{S|DDWpiU1G86bPymjVRvA7LC|@*8ly-_OoNTa}AITaUnW2F*l+i=HK$iNDST> zJnEjZQT4Kqq`}$p`_Cnwqu9Tz5RhkcszX*!v(^7j>N$ZRd_n7T1%{Vr?4>uD4&(;D z_`CBSpjZcRygOYmY@}Zs|GpzdP}t)Fv5MNE@Z+0ZN>r_e$4f9@RM{utoNhOgKvpkM zi2Wt};xOnIWHn{4a$;bc&K+u%>XX{bi^7S1{j=?%zRJn(nf(>SAD#cQcBL_8=~y~3 zCU09L9d)5H!Z=f*3kg!c2sp={QzBi+uaoKaSZ4oxe7uwPRwZNcLOu@&g`iFLQx-k? z+KEc)x!!<%9S(eCjYeZ)rbcke%=YNkjb7BnkpFDu;P-KnMA3_1Q=E{3k*$vF;<~=T zts%1!eSNnKB^0^L)bSzs_N(DuUQFlY{CCv`5>7Y@S28g$_l}!Pg;Dr> zs>Tn=e>)g-5GE%6y*?_>aOm4N`tm645YRLAMP$VMeg-52b}P(fD{(Ku7ktwINZh{@ z%73=``lG(lZA#DPpcNXIVIHQTen*ioh%tm$F(utr3T0@&)EC%&tc~X#NMrLI8K&E! zG@E5UTcMO}y%Yb=xL*oS2Q?~1+~?70Uc|n4Y^N{%tLxJ@3W!f>C?s}3S+mZM6(I?) zpU>+Igya*Evk*y9U0htbJx2+OqcN z-Z;-i}rM7WwzpWEPEW3LCpTO zB)>`VMme#bC&Q=^G0vWERfkh>BsYz^0WUZ^EEWAxm{2XFF{8p7DeR|W99Sqe4F7s2 zim%TPP>xUMxRT7m&DxvS>kG0tj^O4;VdUnS^Wovub1$5yAIU}q%)QXDq%V!op$P3M zly->gXJ(lbJp6}ER|1!V*l^xEMbLH=R1kF;7&$wJjF|0+qp0dRLN=ojiuAf=ABgss z%eHQy45B#ybmq3wG=Dfy1Eg-og3YvQE5m!n;O!7?r5Tj`5Z~`OCWCv zTLmH#%dKFm=O_l8aP@QtlEnK)_YYQ4ifMwb`MMU@%ujO#RU;eD_|Wjds*vo}8e7O0KrJJ4nbb{h|-T(UzV9)?WYjOUVFpk+HZYxw-c%ND3_f0;`b7cP# zc1Y8KjID=5oJ9(r>v(l$p6pohK^ff7CdAG(8-;pjR|Q19BCmNI~U9*qI|RtiK#}|9`F}xAJUMz^<}_(QI7oKNen+ z(_=y^lfA(oNYm&z5e?B=@JkAA}Jz`ot^)=$4P#1Jt=p1N-zK zr-@x1|NHmTW1vy5z9!nn91!#UVJiEzzWjG?F-k`yVt=YoCD2P#LD9tDf9~|{PMiXT zbsAUi345sN|Lx5i;BYzIK3V}&^M6?2pF0BT3yU?@9ffi170Lcy9RH@V6QC*PKxVvz zhO2*aF`_6SvLimSgviC|jgaiRQGtK!__qYZ6G1;QgQJ;}YDnEt&Fi->oqhpuS+Pvi z-I=%;XAS!P`7Va>$&FBJV(LI3g4FPUz0xUX-jZ?r4>k01Zo6T%Sg zJ~8Xxq^j03W;h5W*IlDNS8`~nE>`kn2cm(LU z>5va1B6M78o(|@8Tv|#dR_3NIbX=;gCN8J{k+n6p0DjPXXy#%`$1B9cO~)l;{?NkG zg^r)+1|65Uy^Xz-s)Mm9&_=@C?V+i;w3D$X4wtN*i@B47y^XPpIh~7>EAZ>xhc3>_ z=1$`Fwhs1o=5{W0LUdgB%@Bc^ETZYBaW?iF5V*!FYNpOciu;2uDcXxsZcbDKngS)%CYjBs~u0eu3 zoNDHsZ=N~xlkUEDZCQJ5xr-_@on)uoJn|U`+^sLF>C`)0@o?(RoY=6EEb~Ne;RFLGEpg13NIVcJKfBga6Vst%cu2@mG(KJ6?eh0~!oc)h)|NT=s z{?1?>tBQ}UOJU{eM9T4n|1IS8XR_p#ZOb}qO2ckT?k9@kW;*}A#J`0A7iXmR9s1a? zS5h@=V_&f1fR+D0a$E=@N$&_6_PFSB@^p83%fGS^|Nm8v>vRAucC2{m`dqavA6)Ce zQ2&or?}ai?bX!Kf6D$tC{AI!V9|=C_iGlnVy3%hu&JJ~l)AAsczWu4aUS#M^J zM;d|;vkA+hn9&qj|D!IhtlrU|w>%WaImC>8{NV(QNIolE2;znNN z^*_3F%0bn%u6nIHm*Kq;^r!tFTjDy4K_<7yMlU5=8d&*Wj$9^swy30~*w(cXpZ=C@R0` zpQgzL`c(Z|+RjBTjw#@jhmz1EgY@iqC$4Mw5AD`f;YaTO49+K5-NzqKaL0EVBJ2~K z7&fS2DE>#oP8Lw_cijJDWaoIA$V*gK{K)#1Ot1-FltJ$JDLsy~&zM>!{;&uF5 zEu<9rUkVz5dS#Q}tr=})ef^Y7BMM~CTgbB}LBdn>|I+Vo31D&3S;AO5^ZmT7cbx<4 zX&yqb?0XGPE3AtDG9=EY%2QV~MW{v^tq80VKI#PfGOXNe-ajtumD)+VYNPvc^m2=! z=z!LD2OjXToo2pY{C{$okqdHZn_!k$=1Mi*^dsAB0jL~wiF`98=>A^`7zpY`Nfgx4 zulWM}4)M>I1X|y6P%j*+V|-E9|3eZOd~-y96LsZybv;M%x0S)l5CFlS{q$eZ23{WZ z=-}EeH#R?2VfzOw*i8W{0wH*u$ed1UQU zs4)A-uQGWdHTN9tL_XfLT0hyz|B%Rp0j_BEMcI?)KgqBThroS}xI9kKaMwjY8yDZ{ z1{mqw#-rHx4=WR+JN-L4pISE$=Np-SrkN{1y$2*+`FvIVKj!NU1tA@KkA$8pUtqv< zKO!#QaFsuI*k;HTCHG8&@jLCTcJVL#0?ER-?QJh|d6~-Efq-ype0w){Vf!*0X3@Q< zC@!%sz`yqG+NO}0Vd(#ut5c4suF;sCVbLf)i14RM?8vEY#Z<%Zh42u}aCsgHN;{h{Qcz!g_a z8jQFE2!~u;=begLX;q=3o2B_B$fq|>qZcS8t%sdY zykL3cJ@*<&K?)rL5;-#t!l^b+MANn{5LI42l;VTri+UK(GKc(2jFhv@Zyg7M+#&`5!~2pD z+!|84ju=AFkkMLp=QiujpDu?fIJV1)lm#PG?SB+ z2CE}KeQ04FyP{Gckx2mgm;0_Mf%dXoVl$2TvFx@<8&y!-jabDUOA$sQTDMkGEjbi( zHXeX87J}Yhmi?-nFxp`;45Vp|iG{{f*$y^WId42a4%EJ@L33d;CH&C$+^MXFH=cd* z20CW%u7y-TG8bY_A{yDbw^bj`MgxFneU&pICyjMj0tuF5d8lda&!d_kkqHfX$$eKv z$scx3ypir}gT+^mEKnHsu_t}Mm>hAQ+bu%MszOzzPC!9aB9j90h}%I1ueeCSla*-Q za;{X`bJ(i#^iTnTg#sdjaz<@J>7?ss3^^Yag;4B6Zvjmlg+V1{vj#ey2N}18@3XsN z9gX-Cm2Wbr@?wG`zA*pI)vS+CjXE>?)#(n~Z+FKc5I%b6cx;eCzP*(`H`y>GN^V8t zrzym;?)3dbvi|42v2|L*G?@Ii5?9%vIDTQM->&>Ll(-C zPXs}DrOOczGrO_2?W=K5@G+Az%_>Dh0L~$=%-n>@<9a|>49o);4B)vdfK_ktim3#& z>%dk$N_!4k$wM6~BGeKQeU_C%e5ZJi*-0!;Qmxq#9WcfE2eor3TSVvH@1bd18aeS$ z5P`}!6l_tGtKz={_4DhYq;(jLT!us@CuEoFIdjJc+NPWsqwk=UO8UDb&nP0#Zv4_` z8l!NSIOSvfl0qnHN1$Q@7Hw{Y(1gmV(!-b9sx}tRcvoZz69WdxC3Nm&8#_s9BTTy) zZZ;-H6A`$db)MpWq0_E@3wab$&i&Es-8rq(JISXrc~_nyIC8mI2q)lWj`5jm3U&y{|!M3$Tfd?scu`GCT+kgx8aG=nDxs)YfXL`S=(u zSl(zp3%2c*a$w zM?_RQ#<~@C;s?9N@`R%6on*!bxj@2c1N9H`kVese0|feYAtia45PwELk0p@_2f4wa z@Aj1dUl2Y>Z?c>J)YyeZEu8^Z1j0R=Hrh-gRIZWj#^ldlP1}KnKGo!>EyRg-KpymK z;!RHTPvR#F0tjVIxsM!PFsYM=YD0h@kNx!mn{jC^u@5T&?aB5wBL5l@ty3b0h>4UN zCU;ou*=#^vrF5K+bJh9?@}vMEx}kwR_g1D2)0~hP;Db4< zL$2~~H;zf@Uqt{}+>er4v^xU(GUCvlb6p#9uI7A^KT$Q@|h*--rY#EeVCUmg-TV+$w;i-xLu!Xj1~w1W;Yw%a6qK;GHhIr$P7jn9& z8vz?$unW+Hx!}(P`a;J!g_r|Bm08b0Ryj*npeN2meC1KfLlRm40ff&fS3ddAj#{qd z`x4}G@%#W={O+(LYLB&ML|9IXQLAVZ-10r2@gRaMWfKxOE(uzEI&dzjxIvM{Lle`^ z1(2cnN`vMzDhX~MDS)IRP1dmOBhPvp$Bzs}z86wNtS9h+Tnc_f)MNfRROtT+BNMO^ zFEX8v5sGn%D2fe7L!1Z%`V$s1pS15|XF|xrMSt)?siSTpHEZKe7VD}XB-V>S5@WI7 zy+SM`6H(Qu$g6ZLCS5YQSwDyYNq_-gmp7ZB#EY!~`>_Za_Pra)z?r)5D=~49b?YH-6 zrwtz=aI0Iq{1s=g0L?Cmh}s*AzH>&%^RIiM^Xc_sLQLiXpU)7E&sruSZ>ANMBCQdQS~NT>5!1*ae&@?0(e@*R6g%LOK75x^IN{$5=Tk~cS*yp!WTRO2sWRV#t3Kf(ezif^wk3o3PyINakBJb9M|t#}&!EL}KcRYe3K%^5<9IY`IpY!j&1tA@P}o{i$Y_1~)%` z?$wgj`Lgx}7P@c_P{4s4j0*%|ZDVuBRG?=6u)rt$UJi8HnQwikF1Gdgva9pB;g_+2 zWTM4HD%b^>%p6*xv~f&u9^nhR27&)+V}9NHD`2kRM|g!l6-`L71s715{oj z6Oa>54EA@tjNzrZ5*t9#?-yAtvCV;H20tql)>!PXM|$Y9cQd|Xf5$$DZR{FO^vrna zL#q6+B*A8=8G$nR{$BsO^ZLlZ}sApU7;D%IX^7ANu zk}yE_zu2y@5|rPV=K{sJ&^DcWd#VnQ)XH&pnz-4#uGg$b4E8v4Rg4`X775c&n-*WL+FdqM?2-`90-+*&v{wEKC^|N~7VUozrR^Ulf1ru$>pvQ>iSLL~tMle71*o^`lvQhW``L z5%4r}yi{-vI?1Y1`BMyATHHU-;+7By!TG4hlG*;#uU%4=8b%IO;J>56(}H8Ub}_YC z_{Tf6cbnTc>X8|q1z2E+OKIJCqPmTgKHn0 zhIddgR5Wriktw%X*s_-AhxG8{XoL`uh`^A^5{B+I7YhJV^vG8RZ=4ML)^u{dNpwoaKfjE_R&0EM)y3ekzeHt^`r!fs)?Lj?KRzP)2 z^aZPm!!??z!%7C!y?7h8wXyt)CJNs#1>oW*N#vdWmrhDseWmTJdPFl?tZ9_1T^(1R zOuH7`mPSS=&gg}FH$A6{dODhdJgum>hG!M*itj?0ozs#n)CESV*HCRt=- zItT!DV+eGn$mE*w+sN5w>}vO@(Z=~uZi@`~z*tzbUAGdVqG+3XJ6F&GK~y7x>FF34b>h*u{3 zYiu&%k2aUN=Qm6+V*c&xG8*E_#`^4YT3<&}bmtS=2m6v;R)0Iw&=xQi*NwJQ=s4TMynIKq|)3e;}KS0Hd%aCwE%S$ zbz9ZN4ea+L%;AvU?v?5wMj@k2!Gd^HCRgpt?bxF=$F zSQtzD6;qLmW5pe1mJD|m!0N!40v!CcZWj69tJHiwt>p0ob*?Wd!t%lBuFdry0aP)R zAz#E7_FUO&|mGd_-RyZ6{K!{nJm3-)$&t)kr;D?1 zsnX8jz*X`Tq16`q>^5of%mcqYj?rnbKS`Qr>;t;U&x5z55GwMfCfAiBBqD6N zeerPm7`?fl5;YR(8GWmgvj_K)V9txH)ZpuzOtA~C9s{^(;*~%e%*_kLc~ptl!3uhC z#g2bc97Uj77> zrOW`xYAL|vhMcTHtN0QKRInj-$^{u)4o!J_D1@fREUQ}Sva#0A7crY4Q8g4HmKeO9-`VYGqS zcLWPw_M_9F#^dr8zoIs;PX=g`pf+vIh6i~9Co>v)V(aU zk6fF+r8qXB?@I$7^TD3s@{e)Zp^TF?ExMGPI9S|$tzv|K<_imLvY#CR!UIB@3MSeD zLe_+Q$f4Jy7^_X&OkSQ&rB15mnVFT)6M%XMR5`WpCd-MjY^&`^r^1({jDrw?XmRiN zFw`-?2?%ML2hUG&{BTg4vEiJ-)2QSigNC&ATcsuS#|oMhbn9jnu%mp}(G*z323-`T zle2~V!*GVYip%7foPlz+;`wAsrk1~-8RGhXqEKF^PTLv5w0i(4KXpkBUhaiIw*>(LjZ>VXe}nn=*77Xdd#en9>D1iBt*PJg|Ym| zq~xegrvcwhmt-)04!)aBDXBGgNJAe$U)KrpS^1<|dK;PK^F@8rJQL&ss=N=sm71Y( zH&3GF?B@Rt@U2?=={R&7w0orMSSPQYqKav`Y*N?2^V<4_ z1lEe3(AawchG~EPFd~{;*;DSQ^y=&gpIf>dYjoDAaQ^LnjQ8o^xcc($#wG?b4`=`n z+JU-F#TNESbnkvw)Af{4!)fisLjdcPB_vMP4Vpd?>500xulwHnS7Hp$@xo_~2?mq+ zZupYAQw|o>v@l?@5duIHzA(l;GJ0}O6n%ws(f6QBWr1{nB5*EE4ISTvr(~MG17>Kz zJFin0g6KP8b1H;ahNW((*2=8jfFk`BRJmOvjaCpK|lRfOUI zn>L^sk+HeI!Y-up3jigsrUV96P1dW~qRD^t7O~a^qqILXa1#?5fOg&I3z6F>mNY8q zxn3+j$_u0jq7g4{YtSA#0B->ut4x6|V$UvQ-&rQW51bg_DxI1$CcsJv9_>Bs=ZA}V<=ur%C?ZbO>s!tS=m}9W=_8mzr#wdn zvwRo@WSc}L`ztN8AC@U?`MwI;Tak&y`B1}1!~~w60Nb3p;v)$}j-n~@?)v7Y2Xa}i zbHJ-8-x)*Up${3)l*+!PV5NNFUUx@PIdI)BDiYuh48z8>i}s|3O{qp6=uUv5{wZwm zLqr)}$s(VmsF?8aU57A%NX?IBf826uj^HC?(tH3#TDnTS{*O z5hq2fc;GzApg`>GvvjNhfAAyWkZls@F$BW9v*l?OaKZ32FEdk{Maya?k)N?8=^g9=MzWRpTx&*MqsEg89@HKM_r)p&?d&2|`O^7qUQbeWK!@FV@miD` zCodN0(zC!RcDY3D**D8|vT0m;zDEkQ9^2R^?G9_9+ju*wQ@I=^rB~ok>3B`G9I0>i zUleM;>cR*Rnag{lm#>LN$Rnh$O!nd=VbdNYDp>}&?zNAz3lsMyGu{E@pvSf(Q~MGE zy~fyuiq_6H$4ogGfO^0u+zuzREHX7BCTQS5Y3XqRYvcnAW(&kwF@zB@fbR*hvD+=p z`ol+~QEMP~c#Mabt^Li1&^yZ;O1K(_p2EJ^)T?GL0w*_mRA6auS@3Bz{r7-9P`pif zWg1(C$UyhDd{H29JrAg?VY*Ku-fW*R-tGR`Hw~fCe~D^`p$E6##U z>^PZTm6%-<@~kU4Bb;w1D>gy4G?T|{GRG9RQg2LH#B@D+Q%(EP)em5O3Q^SZ$GP#Z z@nM2D@*Q}e59k*G&H+(;<=D)~#n+V7GUl@4jrS2I=sZ+Lc%<;{BnL#|5EG9a9J>t|Y zn^KXRFjy{5T&M`F`1v=9&InyFfzh%aCuENfD=?l3JTf)}ep3Zjm~x6ql&AJuw1`GT1c! zbT#WA@k(sUF9&is^_Ab2zm9n*&4LeVJnXZMYws+rv16b5+6{lyW`VHDzO}S(La))H zw2vp6RrcB=j>CV-H;YjwF2U$5XQ5pB##$pfXqzYfn#@SA@=m_OUKJDnOJ3gbh^c)8 zBZ3^b?}QLo_wwu$ZTH%O-AFR?qJUW=9;Gxidgn=e@Ex6s+-D~3;T=Q$iPz-M!?~+h z0$v0T@bPC=vY%-%8rK5QNolPL#JAB>l6QTZ&M8W`$R{%Rk7y7hyl1;`$__GA7!9Zj zW^g^*h@9sY>k6WP6v`I1%zqkHx> zjCN7Y3INIyI*$(}kdFRXc2lsh4vy!_ExcTN00BP^yHrHLMCU&o^Xx&^_U}~7G6#-3 zzz~uJOT_qbXd(YY`Cp7tmv-j?_>r4Ybw%GX9~6YzsI491tDPzS8&vRBm*ZTq9KWeX zCi+lp1iu9z2u&)xmgT~+-|U<6df!8EB{T<4)(QRv^Tw4x1!xPv^gQ$=3uyH=8s9Zi z)TUm)9=LY|sWM3KY2n!9CihULqPS?0N8|+g!T_Vk2pBZ>R~;Ydf|4TuOpd?qAgV;h z2VT0=!IZ{O4^UdPO_f>@3WJoil_S)1UHoB2Q+}y{y#vF$SwHVMlec+naSBG^k;;qU z1qD!!NL6w_CH3r~IKBN2d;|n2{I&Ua70rpactm<_kNWB{yv~>5jlR7OsN7I@7u&U; zdAe`#?aSZIa89q+rBZsX`u)H{mXPh?{Q=<=e|%?HVqItD#CQLC`$!_dKSnvOc5>uj zpc(8FSp=LxyGwPDRPi2L zU)pm(nb3AVT6gF;CrFlwRoXRe4Kpts_Abvy5b9$(H$Ix>9@1Ys^(|CZ+dI_YmD(BI z#zN=R5mJ5Gh+IaZWGk?Hd2&CZ@3XwyLp-lK;0j!4xsYN*+@ix>8aZ7v+n}l2V=Q64 z{jN~B>}_3M3hEHqW_WD6b|NtnlU06%d59gaeME@1ZRE_mlkWd4=w||nuFV_4Uorw@m~Pd}1goPoL`KiF`w{%$f;i zT9B1rroMlI+3{@ zpYP?H?Ul5hL1~X#c^{z%;%ztthHFaQIHp7E=Y{LU<8bl4a73Dft@`+gT032?8=u2j zF44?NA?HZEVq*8y&#(!tWz1dXIx5a5nFbYWpKGQb@a6YLcHt>G(M?^y3803euXr_v z^nSLidf0TvQz&)Tvlgo9Ls$oxzJpn1$`wb9$*DwUUDme4LUWmv7j{zNx8{{fOcEb^TLQ8S!wW zqf+51%qF%PX%rcFYN9zqQBZYA1oFsvc9UU2?xQLv;?P^ZFZsU}r<{Ko1`gtPJR21l zBAZbq@=qd$sUrK^2z)y!f*B$gDmePGrO+x2n(k-@FtWZY(P*l$oNLZ|bh(e7Di$$T zW4fJJF19JhRLIZbIBh#SEHy@HXT~OzpvOoWZK%rf?EeM(9PmA`b`fKfk4mxBnlCBt z7(OfrADKd~g0LdturfuCJbj299-T|1oZR-0I!5@Uv0N&=V{`*d(KWZrH{GXXqZq#P z9hPP*N=5EpNEO;Wwwj~)UOVm1cJb$BS7d+UPiYQBhYCN%Gu^zkk&Mx9_Vf9uVXwVb zmLNsLc+e3FCzUNYLtnDtyZ-b;V2OfaSLN>So@prL^Ne7qt<%`Y#|`p)C~J^LTfb+h z;fiLpT~qv!MdtZNow9hdP$jbkU$ozpjsK@LO{C@~bEb)6sH}|8da+Xf@itVojF~TT zSE!{US~-#G^~KB1oEHf;)0Zu+bHtRSE!AiR)*JbY@50>1naO`;TZ*y_Y`UQaYU&itL}^g-FsuK->Uecv~P#mixE09Gj8uL!V;~w%2;jn!mDh- z&=>49fRVYf06*ic-!wb$C`;xmEzbZ+we)ao?{h~!8wb=6) zoes@3s_)~{$wFHUy=q7oEX;@jMx4UxBP{y9hu zAzIp9`sXW4nd$};&J#Jh443fiI87HYcF>nY=F~{A{L<9j$zhork%v5Nm`F5i zAakJ&^xdDs$D6Dp6cnl)kst~X`dX!U-r|V=V9}S~WAG_^ztH)x_VKAc;bV;Gdxbxm zq8AspF(e8F4J51shY81{J;IW)Z%Zeajfm8ztXSDD9(rnWSaC9|e5=+?$?9ZsEc{DSao0WU3-^lvZB;3c z`>rW&quiy@a3Vaf2}_boIwADV?Wf&4`pQG)rdVsuwM$K%b}@R7W# zs3Fekj7W;opXbqH)`zd3g@1n0TE=4vG@we7=`aaCO*QSsfjX(oOZ8*8c=sNy#`^<~ z;8VB3+t3&hoJxEl{!CHURhs-Adi74C+JGZz#|PiLCI^~?hcbnGc5gv<_YXZ|f(5Ai zCnbx)YH>L~mQp*;bJdzat4{O{ipda$X@N1z;mAE^zT)FUPq{KM<|QAGGraa&OJlKP zBq>NX-_U=ZPyfVOEZy?O{@AB*8Qr^mg+esAlfc9}k_ZP4lK`9ijDs*#rkG6|u>UBti0 z!?tZ)yE(nFghDYOr?vbzf!^|)Qn5{Km$PCyEI^Um?J%FlR|iMCvOQ);omKkxf*b>A z__)3kmc4p%{g_GE3F>Tpz{4h~VUfNn({JT#JfaPvhWZzN6=4YSCOmd-kMHl&8=!NP zg~^}ZO1@->AI8X#z{%4D2}{-vwCj6viIboYsyDF9Q+QS{&OT+GKfj|LhgIM`Poc&n zC%eG(upn-?2^)v(MOILS)U+2lUOn`9l;`e(NW7gGmiG9`gfXAR+yn2x(C?l!Q8^Rq z8!w3~`->TPVx^Afz~*=RGg0Gq_M~)?wTeoYHef!x8JFfvT2b3DP;8DfJ?gW^#l32M|?qPIHr#FUU7jzPtR{%ks_ z?=g*g2^dDu%fiE+8to@^931w0$GNF)|AhKt-}XB)a9kKRJ@Pa1g4(fs*A`jjf$F{WV zQrIetwu;yynvAWX24T4vyo?~$Szo9BncLf*Z=h8Bk}l-VI>242*GK{H;>#=UTe(-JC9OD_E4Yn97329r}5G{LrN(n|hQ*LMoICXkB z!S+sO=2$>;D3`4>YHv8A+EM<*-U=!)CCietMON|~ra-~@CfwapVp|Z<^2;P)`oaD- z{vn?~ep=)WE#YEw%XHn!{Q{IYMe9fS6=xO2s>?=C7cM`jKS;@qKj)Y!;vg{{?Deu>(rv|jA|puUYLix1e>#tTM%6Le*@V1v z61C%yHD5?0NsZ>CxoDf^y-R=dU_iVxzi~o*&OF)O8$`c0ZQwb@n3U(Me!wA;&$4dS z=GgkfGM%zt+F;lhp?sPBY6zfX75EA~jW-lzMT@vG_<=bj3_A5Ip#%#z&gc}0MB5bI zRQtD|;Dzs;)sv5j+6BjsQe<9U97^J3r{MDs83mdIzSI*ExsQLwQJ`d!2Cw#zfNbG>x1+Vq z(dxbovR{z>xUFK3?f97*tXY^qdqGS)hw%@q;{nN-Bx8D^7!!HqKm4?f=h!(LS=zo4 zEjC*$swV`>qiw=$*E+S>tIdmu=GA{M{A=&SIy~d#hCK*F3QN7mG#dU7b0Ol6z;>p$ zq2_3sMJ_~%?L>)2GhcA}QI46{z0eO)u@UB`?`sEvFM>If!@HXnT+&>CJVubT+B=*@ zv$_W(1Ee^-G1q*>?d{iq zEQ5P7^M;t@EIJynT2AA-kH-W9gC8}?7?Szz5Vm?#z?p3KjyB;Z`i<5{ z`J~fhH=?!JM!vr{NdohAXq2B8y5;O9nSfvtQ6?)IL48YOPAhfLMYwTmbkC_m>xKR< zZ9U$2F)+A)mzqacT=n<-!T)+*xAkUYa{;IRv6SKs^@qDr%+RS*WSm$SWsLF04O!>Z zL;tWq%J+37qEmAL>;Aj#wH8Ll5u}Xb^!G65C)1zlYL-Jyx6aw6+Y-w@XOjNmA1t9s zI;Dp{d)zA<$#RJa8nn>qhUylV%*DhE`T4C)sepFn=4T2~9<0CpDvx>IkGNT^6K-Sq zus&A4zZEBUVK=r@ms{ zUk~U$<7E^$+A{}r{9M`c4Enz@c$(YHp*gD~X%Gu`e*m4)mw_9yqc9l45EUDwmiK}1 zq{BoEn6}lOjBcUIl32({R${(Y2Q_9j`MPG_(f$We9uy1_5|A`wh=dLPs9vI$aS=HP z5|+%7Yc!eL1JMwySyV4X^Am=@`f7x!>nnepjtG~uGEWL_)*)pjLwIZK9Agfi9Sntn zGU;!wD`n@EMt+_SU5l3!4dF86^V?qKEs4{Y=<2)!DyBO6xcAtQ3YKkByK8eV-3rCPq z>#)|cn&q|U*4;Aa7JnICY)50t#-ObB`K~QRbvY@$Zbr(8d=>rOe&CwGN5)jbmou`MHdhP2a zS-SJN68H$@s~5k0ojx_3u-zxgZ29P7w2FVE>%`c41m)aNZ8g-KG7mh)LXi|JMVNCc z)Afm$&oj4p%rE&=%tHQ+oLR;$yjqS}~ft^SDxqE4Nw7Zff&s?!dU7ar;8E*PP zs8j=LQdc7sbIEsLZ8HA;i9O}*{k!kN2Xzr|uuWp+;quTMA11rK)DZooZv8nXgB|JJ zcLwNlLWE}KdA+@1dH#hk|vnVzl`=sXpOB{u4!x&yoe!a2f0>eZcSXta=##yG+F#@)F=Ct=spBi zWOt0bj3H%ZHdCPd`p~vpXZrpr@G?5~jB1>^Yvv)r`#R=G9Js3g#D>1 zwMiS`^1P)oUBhO3EIGn&EC?hwS_rmnFS7S<0n1;w^WGI4w~s>iV)DmE6flZyn31dA8#H#M(b75I{}tv;xN8J zABDpjl`2OsQaiKNpPoek9Xt`uH5lt8A&XD8XM^OtL|Mh1$z$adQf%>Qwj*+znLN}m zdvE5M;~Y>7nSO(4rUe_Rg6;}M=AMcB+pVBtWUh$xp57(3+qmgb-&c*|26#4(xI2g! zUPvG`MQAIbj(d-o!^|CsCaRfChwIkU&S#k!Nd|0JU+hMZ^*8mqotPZ66|a2||GTNregp81Sl zn!+Rfr8Kuc;dsbV4Bt4^vFWJwuG*+<&uX>Ytcv4LuC5b6CR-^NGo~8qgAb0N_|}fw zMN!6%9$+0~&O=o&`dLvsiYvy%cd{p&sC_Lx8U{5TYHNf+Ur1*zpI_=CFT*mL(L1=x zCTw~d3kyNboA2j-N+_tiJWeukct$iK)2T=OK4-H9{VqM~nWnU4?E{)vUcKC?oLOzw zpDyMyDs@KeTEw~t=_hS<>Zfc^5t)AW-Nh8udEHqBpIl>-9GKaCDG}pG_fywLK2(y> z2k~(t;l=yYjWJh6oE&8T4CcX{->W#kCJk96&Wpm?khDY%L3D=hrYuZV0W=Y`35)|V zlAisXLo2==m}13g-MH#Ge=4mnKlf!TnS+k$;zbCW-(=Xf;w>{!2&eZ|1X9cD%@*E*Q;oMh>uQ}Z8?v3ZMj0sAoW zO}(=lu0Pl*IjUpv&(d_<{P~>ffeKESlePs*Oqafmlf@DDp$mg?D%dXhgw5Cz_~<4qxkL?4v)+bAe}Sq1~>YpC|5 z0%oDwKI|fqPf*f$kRUPs<21O9Ts^2v(N;HP% z*Mz=2>YAc1gxVs41)|jeD=N! z9XFczECF`Lw`~ps=LA}T9vLeJ!9pi9hT}r~%EEvJeE0*h{%$QnE(nEmDE?<{vbY1b zpw#eMSUe`g4`{VD+ZF9s#Lg+Ub=*$m13Ddj$)3iS7(*f1)}JsE|AsT$;Ggka!SL%C zOqB;-J0vpI;sB1p6X&Q`-z(9~ktM+;Z()POAsyms7#NGz@(pFv%GCWsCAa2mR*uhm zv1t+L@@%70|1l7M(wd``2_jF%VjEKt;b#@&QgfX}$bbuimEZ*Q8`B?8xH@894PN)# zFy476*2X`5&J4b_)H#v`wx74ZVO|o`UQhASs0bR=dKCXyWs!XPK70|B8i#ZjWA6$3 zD>ZDGTfEP3yOJvLRFZdjtnEmbbQaps^;c=h@V&F_nx)`R$%-It@k-o6rY+X5NPmfa zjbZmo-zmg6TaQ9FGM>bDFMRbamVA zo5Dc1DM*;y`>M@``1{rBOozn8-cGjnS5E`e#<* z+@h@~$-=+a&9^sbs{Mw-LIsd@k>=vR3ic%oIg-*5IF=<4$k#W}!=UNL^13HcYEmAp zU(#CLB;B?30X=PRe&=VOol$q15VWCRz7}B{%pV*ZzM}I$Zu64Mz|l_e=&w@lKBOxd zkUcrI3^UKRk1g~3^q`pP%7@juO5`thL>RrdXey?x>G{(6Yt{(QduPU##`Z5Y4qVWZ zrArhFJw5v`7tK_jKF@wr6?y>-S+i9l_8ibt@PKX<$P$&-{@70wMyQ$x^ z;B?C{Vmq|#__==LGw;+MOYef7@X(~^qFy_PS-a(5C0as=9lrS@XlU8ziZSK!m3O@| zQ6uVFSDL|>A(r}~P9$f>3R1Hqat!*Ze~-&76}iga?)~`@Dr9?TLv(S$UsXAm!!!B)DW4V1YTww~9w#t!yhHn~{wXH?TCyA`38N6ko?2V7D zy<6$q^{7sm*%Y_fY)7kff83~H40{ZG27M0_E5*SqHT=xD|I)AX=yIW~fxFg;po=RZR8LNU^M^%HgST%cv#` zs(>12V%GX545@cgq#@6M##z7VCA~`%Q^ii#9ud7qwWi$yFU{LENSEoBJJ_3J*U2d| zO<%e)h0}_I%qVDcBcGn$wugKaDZA!!$JlJ#niCL=#%ZSTSgX0wM3Sui)h~f5e2I+u zj8ZU0(NM~R8mEEdeKx-gZk>;RFY4~Y&uJ_E)NM^e{e={v>^o|ANm#;6kMLSy{rgO< zpRTqt2jP?IKX(x#D!D18GWpRj=tfCD!H`gBX+#XK^>k`o)97Meb{ottQ|JrAcuP+(E##;jv60r?LZN*K4?#8_rszd-?I+9 z8{5(Jg2#^cl&`}SUhTw$S!*zCcGg;qA3aN=?Nr|*cB?nTKK=F89^Q>dY@P>XeK@Ws zsw6Mi6i^kPuGN6tg4BArwcMpzam=gbXa6!OWecN-Yv!RuI5<4~N91MPF8_R1eTNO} zm!fw+bo`0J)2%j--z-c%qFYBc+ZF%s64PAO>~11o4^33ZH!59h>ojlt?%=Pz>PU0t zG{xy#Z%k!_uLgxsaPX#2HNRc*jb>L*9EofyRSEhiz{Byt{# zWO(p%#=5Q{+4wwm}O{7{~>fG&| zOq~ zO6ZXCWYx=H&@G#Yc*BjjyRvH5p2rt26z51xUL;Nd*$l~A!ks$``e+3KBVOToD9QO| z$*OvBF>L9OIz8%^#a?2`XDd8apK0t2)xTv*gwzT|cF!{RrZxNJZ+kv(wQELW_VjGi zM>=kVH%1ylHrRg77ysNM6{s#Ga!`%AuP6Kl*v&DmMUSlZr(Ll!u1 zdU`2&>l2@COg4R#JL;B=Lo#efPkisF*BlGRA}Jhn&M+5yI9B=fT|zT`07INM0;FcJ zoS`I)n0Iw2qD(gruZ}B8u2c&i5>JKN<0Ab>7lksnG4+h)b)j+2U2v01Tg+qTV$ zZD+@JQn6jJt(Whd*WSJD{e!*+`y6Y|KCJ2;BZ!>=>K4l63dU_~gyi)6Xy5!Uig-q` z!>S*z_>ac5Z02+ObKz`JQnvLYDc^Fz*x-PpBdq70zn{kiN1$c5cDhbEUddZ0?w1S5 z3gQhSqAD3b?iF!7CmD!XgfOv8FI%#qb2J!Izlb7s0<5#NouWbD(Ge*JN&>-;lJv9< zu2jM4b(bG^gmUNXmNmB0UK~GYxJCMV=iRe#o*@It=Exh*xo9BAC6)9S0CyDyD>#*Q zygwT{@k^HK>qO}J2dQ=qpMq(9u$;<2j+nToPP^R1!aGcG?V`>wJ9#T?1+c!SvY1N_ zvFOvHr=+~}hCIsiyG@0E{GIV#5;K?uQASnDBuYvbU_n7pw&r{9hdiJwz!0WPNFZ*| zBsOq_It3$dNPMYr+F>j9k%z^?%=}AZ>w-q9i4xNA#V570PYi)%8LEDLm|wwXdX$@^ z&VB4qD$?Zc_HBZS&Pn9x7%H6B3W@y)Rx3DXkttoZ-cK#_eRanJP$)FjoANm`$M)r3 zav(j;KC7j)S30}Z-GdRXgNss%X#o+Lbp^dOsc=>Jn~mBj4{N&(;n9)hOAA^@W8!F0 z2zNmdR?F8$`Kc_jg{LVYYh|^${9?4fm^Y9@Fw0)(Y33osiUl5tMz2iiEB-0Ri0x-M z+0;O`mt?oD@TwI7n06f|-RSkuU3FRD?ACu}$w<=IzPHi;fR08H)MYUiA}If`CcZ$& zbpkB46ZAx2ca3zrHdUndQ$cDuF%bYeK0Q8? zc8ER;iGhY)GSm(G3;1+kF&Go@j-UVI#O})ZMKVhSVF_8C%YuU5p=EO|EU`xWz z6VT?(^FH%LagzBp6ac(ais5j(#T?x?W2+hseGXC`9di9+#M89}*K{zEn4+lo!MvRo z`eLUwldXX5zP~d3k&~o244l+3CQkfhcXzrEsC|J3XxG+lqVatM(i$?f+R@wAl_{n- z%VJc%Sb_NojOeZ^r<2w@&Rqa{(~0<tzvjGdHN)*-Ee--Di@52@ia8X8rTvKT8{SVqM36v@QY9DIP(+KMPq*eS*+O=w>5Xw_s7DdtD z?f@z0)aq6EfCovF`r%qA_>8pLGS@fEaKRrfidFMBF$h>9whR(Pvrt&c^1F-Bg<~{b zYH$)5C-4;bgkbNL9VhcC%RLu*wOwh$LEu#{O~MLMKC{G7D>LnbHGRj5jcsF_cy zA>2LuSbI0^Pkh=4eF8PV*ysc4iNKjCwH)($y zGk)cKUHH|JZmQq#1Gdw=TVmT<-1kJkc&QwG{LXjqJXpH%lmF zjh;JrV|m^-%9zjIZ_6dJ9V7n6RxAF+aL{GZHX8x1-UP=dLvnt>aYs}Ld@tLCWs6V3 zqRS3(P3_DPW*|%zI=S7t7j3rUIq(ebJ$u#|H`kXCSPI>3&4l&!yb;+4@@ebLz$%tU z`r+-GWL~1>y{yldY;Kwfefb1#m{5vKB=%b&sugKqOt$6`Vo4ib{F;1%g*UqU0gj~+ z&0>|&?{7YaBGE{{@NOvyr(88wYHIb5?S)G?iuP6lrAtbm3}#Y`MJD&$pqH+JL%Txa zjrQ;&@S9fhe)%1)ZyZS0i>hUHo82B{d0=K;qHSLlrJlUMYpMa4^oIUr0o`#TYbgEs z$5v+#48L`smk)0FBHwOJ$yYmX%&2bWX&3(;W!T&$__=}rVGBW9m@kZs>)W6L5%{a> zNI$A}Ke6vFN*bFHIU_gZ@{!@7mPhf0!Ul8btHQd?)IHs~Zml@HN_5%PumY=4_+lvW z#clPwH3}Jjd{VVZ)HL9|%~j}DSg)1#x?_qIT5e#W&iHu@F zOdqp`%^#*P9o^b&c5>|SKtV9Ze-c$C=(Is}0_j1x8Yw6cFDQ5$jS?CEB*|P6u|{@8 z$Y_jym=fG1;fRy%bi#_|2md|5P%DyDZq=$J*kU^M~D8_%7y4c@ZsjeIzY zx<+>Ig=uSF?P(lsHZ<)`pRNpOQMYn?tLG8VP8eWIOpF+Nu6%{<+dw?8BF_j^e+8Q_mCUHcJzst`@#MgMXvT-TcnBMMqk`SvkLn(W#F)D#7h5*>ed z-_+lBcQ>}mjF^uXNvO5@F!EoGff}DtjeSpP#a65h^OKPBD%!HD7y0jHmj6(v^PM&A z+R{f_n@{rEAx+TKNl)Du>IGlV!Nt5;fKNssuydLd455{TDvr)N6aV`DAr6#Vft_qe zuZlTA1qjc!BUu>~{eA@=Sk*}=ok`2ch51XQ?$0)&{)Nq_HmB`4(YMWZgvmJXX}2Ezt-q(NRymu~N!!R*@?o}UQQ_#C?IY53icY6`4o ztvZ0l7kZh~uRq)^aOSpa3tS@-oU?X{#0MaPc)LzH3d%B94x@5SqnOg_A2?)T0ctNVglK$Tqc z$3zoIJTL^Ch-acSp+WVu#frYxeq}1I_{t`;t~P~IUHt?qrptumEgZ7+RhD`Q;ue3h z*f_jQuzZt3vs<|Y?D!(C@I$@Rk4_|d&(xv;iR{<>UoRqDjiB{ZB?}%?`Oe((k63qY znw$*F)E-T-vVRv$z~ucV$&fv@=P4%Kp@&Y@CenpLzDzdbcDVcpqqD49WMO|L&^XK2 z6LI%%LE^rfe5YU{h@G=Wg1GswP{6h=4{F#$*k*n7>V%Ie&gx#U45e_6#e(KBt+rH69kP=3m*-Vbk>@x&aP*oq>IHqIXXVYzDgYnt zSB#yJfFk+VgU-}S*li&$t3R923Cor}q0kHijvw(}BFs*%M-Y?85yvw2_|MUV0?~y5~rDS3ZM;p+%}?~jZ-;0ND{h?LrY2OioD(JsCCb!ZDpP?xJ@D#vc-TbdF^*?EbfJv0aDRQbz(LsuJ?Jx+`9Z`Q`2A{n0qcZIy&`tud%> z$@HeL$i|jrUQ*R|QP-MdN>Os=(5O~B=_FKBR@7glp?=EZS*Qh*gX0e=ke3a3Qn4Kp zmbeg7X*V2^FJ79hlZ+?phQDR!SKZK@UK&i>4}-;R=u1-EKZmIH^YzsH%qUQ7+_@Ol zQ0zPCOVe*ay7xUhwE;5*~0(-mvOtQ050zYhHcsh9d* z7iF#Fu{$IfzS~0&KM<~9N-F2O@#G@duZ4L1g|g!=r|et9G+_;Nw=ve?*f*s;2Esz> zIEGIr^55}CV)p{!OEgP7MqIJ`K8^Rz1Ea$5ClXA zbR+othst1gunb)4pW|Fvet1%xdOG$F(N!=C^hT^VP64}3DUu)eoW)!Lc92SyY}~$6 z>JZm`4ri+%TgA}a(cA8LMmiF2ru1Dk1w^ng=SlliAGUaMQSxSXuX+|@BrZHw2z_&%Ufg){>pegxzACa5JrE)fS$6b$^ zZ0q{A1GY;f%z(2bt9uJ+8dbHzU!N4(s%j7E{nT5h+6oxdurBoAVl?b2zvchekh}@t zy(&%FZ42#28?ov=`a5JU8RXw%juM}Nlc)m%8^%yVA5>%1U**E8qz{hXL%*|EJ5U@yCXfqvLm{g9 zCDtUn)V<*^H3zH|{?W5AE`4#t^H)-(ky)SQ_;%{1V=Buu!+{`Z)l6qi%Qpjbyu;c; z;pa!1+V_&^uor~Q*wEFKIsrub(2G|SiMqELFnF=mXl{p0E=0)CLFG|!GXy%N*pUUO zyN{`~=?9(PWfHm6ZBn6n57c4ZykC!@2MgEckM)V<%tM(Nxhw%>6%8_gVtp9~Nnj_C)(@$|aOr@dpYX(wsei_j`*<903KA+hb106LEKT>dNqdV4L_mws%$ zfxsz~&)gZ?>mlN^Tf?anW4;#l9SL3=gkK063Y~7Lc5>eJF`LEq05Xdp|3Yt_1+$)5ICy&ytLdv=xoAboMLRpKo327{j0 zC%-%DRaW););hBC!M$miVRZdwc?sO&61cXAnm2uX;>j8N;deg9ZJovY8pMkVV0h| zVFvANrZ^hbE^JVS`dH8X02sh&$RPJv1y;H!VH`{6wUv-*hqiuE6uPrz+As=!N6+$^ zDQ|S5ERK9+Ba(2l?7il`o^axL-;1yX;Lg=rUy*riwFsi^@Q^@6IsK@U_duL7(PP`; zV-Le>LY>DZB>Vej8%NPpyXRvF6lllIJ&RQ1fMe(Ci$r+M-lfw%=>aI;hh}weX*AK{ zLbiH^^of3o-Tu0egPs#3p$6?bR7o)3GF?l==|WM3%|Pq^a#td6t#73tf5G;``dFvp zW?%o>Z4^|nCu@MX&CL7_AvOYKCQm61x3L-BgHy8d?CuRP*}WM*LFOXpM(DQ~7VjSF zW7JT{Q^0Uu^q3u-kBu3qFp3S_uX_T=?t(nhz_W#Tpn!ru<=K7U7c*#oQH)mD%Cic1 z`6NHAxmtiy#fzxN7fklb<@5dQCbz3LS|KMN`S1fuDj;YV`F9< z(iU}WuFQxk^;2}tY|w1MfKKUEXHc1v5o`Q~Y1Y;ec=N*Z)$x~|N?U9hQ@uXphfB&5 zF^18jX53DQa$m1MmPCiqi?UG%s9!?DNy_(`R5p0Yd`n1xApb>z{$64!U*} zf+jKsCiIZ|m~o8pt*JrhkXl+MNEfwJSl~sK&&AK?!RX<>gH=kn##Oc>^ARa~20v-| zqTD;c9sIHuo-O^WS;k(^FlaCyR%9j6;hjY|EmNNv;thKrkATS^jLJ~ypS~UB_*Wpc zVbGo+#vKlUd|&&F0dO1iN^UIC{r2O$}P4RYor*fprpxqOW9#sQ7=rl#a?vp)|NE9;u<|m`5 z*x#zbWOI?stA~c0w()xYul6S|K8$-oY-HgX0S@GK*1Tx=EsASj= zUi@Xk^nyOasDYrj>}WGE|3nc(I%%$|G z9}s1AMR0m%6i6yubD3W1BDfqUxpLN%6aDb#YHGjmHJ}+qUUsO}#whhb$El zM)KH9o}A}XjFV5&{gMS*XCh4j+#TD$N_;&5*PWvIFHdnAYq32myyag*tnO`N2aMqp zy#Ll?8k?DFaB}`#fQ#MExJ%VBikw;qr(Z4LNuO$uSQr3XjSK&A&kb^<%Z!h3w}vc_ zd!3sE6u6tIKZja*7N^DytXI8MQKx3o%<>#5@v_py=XOWIRlLXF88iC*N_96P#(r_l zJxsNX_)s3V8f&+hk8N*U{5lpaXdtP{M0F+Uo2xHp-JOjx#mbh@o9m;95Q+sJaZr+6 z^QWv`b-H*Kd019~Sj22udwy--TqgmBl2e4m$e6aZv6FM1+%lkjyij&*m~(@=z2*l0 zF-sY{5xyFWa>-I31h9tA?Yi^YFmG$IIM-aP^c%CV<*xTz0(f2j^r3kJ){1mq_00+o zgbo+df1w(N_)LvhzwzA4OJ6Yfw3@0F@|8|$OM{$dWZ${oIj-``8XhD++L83eI_w4w zsjKem$!|R@p=MFi>u*$Pe0!M%?y%9c~CRBUP`9>%~h^l@w6*C1F0UZ#4e?ZVoXBZho%1! z9RX9>L;V{J0DgDxKb30-=~Y~q_H9tRhBZ;zR~q8+akwf{EtR6hgs}Eoiv_(gMzK`_ z&UUqB0-g!5?{^%F5DpCqNfcOJceO``G+4Sw%4-~kuT*`CsN}{m`z^D=193w&Vb8_c zg7(l5y-2%ej62L<($O{_mTphB?YgjM3h@?vktJlBORVaE)1>tFWn zj37zCuw&xudW%`8Y<(I_01sv80UwYHJA81FE28*-rKIV2sd1k0@FdZ5MqBK$gY!Li zcMM6(0Jrad@B2H=UAc`}EQcu3mIBUfz2Qucn5%;X{bEQKJX-JGXaLgW@)s?Y$o z-trzt!JM7+k$AgJ6bk-sG9e?WT^a>HsQYo~^~55qb1o^*9JjdttrNJoAV5{g`fN{(l0gm&5Z7JiM?cqv!)NtH}F_KC^ z;bPhthyQ&qE7N+oxYwcd?;nG`pQ+gqubndC?XyEM+mf`>vahlJzRP;@zQ_8A#I>Ck zaCbJb9TS!KC{_C^_Opu+e(n27h$TN?%?)jv6Mp1lOyoLQWPDny;fAuh=}m`L+U`(g zpNQPB0&5l^+L3VezQ3S#P5h~$pFQV);YWn54398x%X>E2~r;(cw%m zo2nE&N=EhdbGr}Y5J1`QyS@(zvk``w5W!QIpD_pFTl9?OV^SHcjs!M+4j|Yf)MCR+ z`!em=MBaB;-v_1MQTRG8vxCa_bt0cO7fcaH2(v(EPI=Qq47;}-#=Lw{kN}WI%VahPM7`AXO{({vEmPQ# zxP7^!y=eyAZokg4V}JjUZ*3>jDZBm&@D07{Z)H>VZvfK!3KSFZrflTL9o-(AMmEz7 zb&h1)%X`cRPqXT2hIixTT5enRF1H?7rPm;*yu6{XApUe1ghZF zO5@$80uxk55nVxT*WM7Zb)1Am>juLZ+Z1-)4Bz26Optm*XL?&@DC&6A#9`SSv(_R# zoqR#BU%Q?SnUims9y1;Ba?U<#rKi3gF7PpHWq@iJ#ht?t$=0uZ5YEjN9@csn6|e>! zjgZK>2AY0=t*K$UJ9`)58oGGUr~WszWkE;IeYnW4{PaD492__JJDW*2Omh!K(5Q|g z61)9GB>QIsgV$}lw(AFn2vpTuBXn%)cO)_90QEd{<|6=F*=ijbtaS-XU4HEX?_o1; zu>Za?>KY5E-CKN@ZWj^s6_S_l@8rSt+)@REP^C|kNIL;9`H+=ksP{9avA=FQ`vL_O zn&hQ8s@Ie`U1cP;XLuv7-kZ_HhxPYyaNzR8RXqa^wV@&I)7EC=mSxb$hVxs`3^ z^=B{^PeQ?%G=)T;oUVvR-MKER^E)xd8uN zxV)_Y268w+A`EmR+~b^&k;0L?%dXIXU?ecBQV4wf5NeAcE12!SXSaqM7|u_GqRt&H zl!y2HR9IKT!E4UgfQwNhba)hKI+Es?e7f*knPF5YQ_2{IW{8S^ED%e5CKnil_Ee}- zvp|$RZd0p*_^g`~7#%pDET7Pu-|@P)SdE<;2@RV-M8GzZJ-tl*w>c}x35zKuLA3Wl zZCD)=SDexv5 z8<2kU+9!oD267wC1=NXo^A8ZV9scEE^ww6*<~3O;FTo@!VlZZ)_F?qL!H8= z(i6O)%oL8|dX@couVavTv8eX^Um(OkQAa2yekx)y-bbRASlTMw6{<(6uCWu>UjOt*j0RQUj+Z4V5Q=0C(`iz9c2Xuf6p#A9PJ`r z`WYaC;mCav3<=J*+h<}yVf_!t*v5etBXn~hkhnSvV2>VIdGT;37A+-p<%4u5>js@8 zo{u;^7Mx8%6v$)Z2Onf|tr=T=Q{^OK2S>!cBed&kREp?&(>`}puud9=#qWh~l@PR1 zdXnQyoh#uxI_D#E@aiHox48@;b;-Q-;M&If_;R6{NI4oO*l>;Ow)X0K*X~+hWpzwx5xIUr5vKoCts`^@}wmp8OAJDkx}2!IU@w6`LS{bN{N549Y;u3=PkpMuHJE zmZ?O;HLZ9R=_sq=^Ea*v1tRy`GHo$h50=y}I;_?{G_~Sea`VcNBU%v*$V;p!0TK>} zS*AXn_~F$RtaedzqUTBC8-W5&K)ilH*A%!dn1~?@hJ6dNWX|^uH(;yZzM}3STI_nJ z%co0ul2WZ5>PGm$8q~&0vcl|Q$B_Zd*%3Hxsr}~&wQD%x1f``u?jTSWh619XBp%uu zVZv0|To*8#WZ(xFe(<1sCGO96M`P8QANEcx%z~k(6mcc8 z(>jv)A3?Hj=13BkO|RO9iGpXP5d64r#$D65xHTD>*f1a{Q*#Xb(}gAplf4o&IM;M4 zuo%8-Po=?x(<}T%f1f#|ukbYlxzsNeUqzYQtaLq>0f90f!BIO3(c8o}fWivod+#uR zcs{8KKiavZ0g!Vg=<_dmx70MvUzE67GV`e^uHvI#N47HQ-~isqLpN6RJ1%{K&dd$% zLPH(0GQtNgec?%p&qng_5G!k_10on2AvAk*oJ4nD@IwDapHYhNrl*Gl`Fb_cfg#s+ zKb>(3w=weN??)_-7uv2{-n6TqVn-2YONh$+LWDEAE8tn4nq%a( zR{FxZJ&*UF9^Ia2&ODM7`$ybZI1`M*fcxD}>X-YgoQnu~AU&a(Q6?5Giy+xSD;_?0 z2qrE*i0~Q)9FsT%rp5O&HMW@4Ms`UfUfGGJK_JPGZEB6JRv5!-c}?m=Y_Efq*Zt2^ zQ=-cwHR_Fkbbde@&OeR(l6`as&@0%BI(xc-`uzyhG=!lKX`d49-&Bo^Q^h?%pDV;* znm&HuT;pUuWZHbpYaq3}3BktgwxobtiwLt4swx;jr^tC5Cz*%EjZ2!Fmh6wY-g>JylO#9;pjd%LlKP!Z4d7 zZ)yO-61ZZ*bW9X}mVC?Ej8)A*(#FL(;DiXQfyMl#df9sf{w?rdbCXmVXS4{Ar+zC& z7e6Cw_9$Ztzf~FTWBAIscbX|hiBJ)X$qG`^*g<@%eRwcBy}%?-DRQVM94>6SkD-y& zS|&$)8P6fMSe-I0@L5;Ypm&<-*pm-+6|(a2!dtkvlq zqxF$~Rl7!h_eP|Yk&$$&da*GHpHPdl9>$^du`Em#R)a^*JSQX#>c#sn6o~r}vD^us zri^(jx0*=}OW~8BMkugmo1(JRA(&hC^}!mdJ9vxU3T_{!>*Nf02ryX1#db21Ydq4+ z?D>(v-22cdn-=N&O*v(s_o{YK1Eh2qyc<`A!QFI&i9vhTsA9#c~WBz2YSUj?_48|zyek`ecN3ihGaAZ+6?Fk=c^-;xziTwOv>?H0d0lxqD3lSc4w=!r$EfjnRDjHG1Czb+* z`$?o#l*`JNDl>PN+YUynh0N%(!nmrwt(YVHl4tav9bI@XHiR8_K9^?bAj8=fW$`8=s@Wc8 zC@^P`-dQln`t|kO|F?*#fA`7p2Y;?y;#fDM{kGTYlJt=CRWS@F-Bcy6?@ zp!QeOM-0Y#URzjJXv0s^XDuXF%%7&b>?;|UZSr*vRryCH?!DhNY`-ZVbCAD2r?C|2 zupPJbPN@T?!e_^&{RS0On?yoobjYOJkAiA9dsaATZM*Zcag# ziuk6+K{UlGu!(58SO`*_BY%^*A%{pSHxc}Mq5DIfCF--%|g}ekK_;( zwbr?sj;+if%8`q&k~x*shc!8s(Z>ujIJLnqSQgSP?eJMIM`Q@P7omF0o9=azT*c-q zi?WBqjCb9IQ$y16N0#N;MVK{=91N=tLCG=^yah?^sI$Y`M89<|-bA(g!Ib~(;@$kx z`YR)~DC0Eqng{;!^9{Nnx9qQ7L=iN7ve_din2_DK*B-qOy#+4{?sJsB^U-WoMV36w zWXt}GzHN@+?sR<^)^jDE&ASrO_r&5vE7Q(=Qz&5}KR(RJIwcvouMa9QEU_#r_!#z`D%huggV>FSvCA&1}R6M*%V;J6tipIjn5H;$;9bTMok&S}}Lx+bvEs))kb= zKxG063a*I~OH%HtyHI$b7a)@Qvx;U*PWvx5q^a zSlSq;a*MJa{Ky)0sD-j-lAu97<@d#c+_-lJnhXf?FrP7-U6d4|_Bk&6Mcb#zq52&u z1)Li_YnX ztA0Tydi8IMzyn(ius*3TpR`I2ym;OONsyU8zSov)dx6%=S|7L9v$o{ZbMDI&egjzq z1o*%VK{R+u!W3Vra4Eq~HMQVdX^ut*rZfc&p9$ycMeX z{HJZ(jyv({>rCmG(qAA!*1Gh}se?nihcXB3+%9u`I79&i^3r4#v*4bP z^#60!eW=z8QI=)B*{q3>5@a>Ub8%_0odvMK1yiy%(Gg~VJJi^r#EQ zhv2^B#wzdktM5zg!*L|b!XM+ms!DQ{pf($eNmH{Ea#@uTbkV*^Npq-5VB>dOCk`mtwW;`LDzf^xMah zXl7UHGJw}3;7|e6y2L$Ym_P!!lZAVo9a#S)FMv9625bb>sTDOaB_vNkj{N@tdEv=5 zrG5TjI~8%p9s*CZ6E4^1wUHp_ZbLR+m2rMgG(Egj~`U! zLdCvR%XLk_y9w2*<7S_ne{KE_=q!Z3yt2RDaI&ri1bV}gcD0f#`yKGqAV!J|VJrJ51lFNFg zBrgcY)AP!Y&NG|L>@IW~z+@{*73^}stdi%HfN^BPTNuZP-PbKaMd$WYB!ngT~_$6#u<-uLPMV{HnWwK=Fo{K~fyHYVC3 zf^N7XjvD8PPsnR$!Sxo6=>Gvqv@~z-W8!1*k-kH$?%bD&Ru~-sv`CGI?dT6s-aeIj zLx1va%xoKc#M3{OCP!ALg*Er z+y-IHzn#jaLi@QuhzZLPyZC%$_i?6lSt4?dzJBtWU1d{1z`5_8SvC@KA(GW@&r|07 ze{s3*hQ9^Sg%vII@G+8_Un0q$t7o&IH%VGzx8}(7 z*6+uTK!xFY=cL0FSfKU6C+)?T9zJe|(V*yl%tN3yMpP7~wQ}e!TcjaM$RI?LFh*QF zLc2?o%?E3+>eX_87DTF9aPBG;3J z*masK;_ng5+|a40de+eC?&#+nV)#{J8GZ7e>110zAWhm(TJ&3rYMz4v<~A_#tWt%g<+zbim znEo{^b!WWdb47A8;oqR{&*nT22vJIQZ10l zn0XY=T)<|q9Z*jFQGcwv>L%v!n6GR0`i5zbf|l9;yZwi2JCpj&k93*286f&Ke4tQ? zdF`pow!>D#jZ}=OC=Muso&0t>+G+;``?@Yk4xzGT)pMe%+d0F@R5OuDa5nDJl7lAU zL+MJPK(53zH=}3A7+MXi-Faf>%~2L;0YQr zStpKqHG6!_8HIU1R6E!vt;iu;&}{izKN8U%d|Oi$5RuPva5enVleS$32H=!Blqh!txRTJBq`eu#UsI;ld)?fYsJrxJy}SB(ruj?R5JI@LvLr$)gPaRN=#CGR6|J>Jc4K2 zmCn?@9IBefoFxF4H3Ff1_~NL;^p%2eQK3MBwih@{L|amOf4tHbAwL=hRh?Dons2d? z2BkOFuJZ?HkSDH^dUo|Xkn|gO=PHG#D?bBR%qWqjlTsw1GrKo6U z=v!%d5;S@&my~YWjiahHRVQ!0imnV;Zj*V#sCcR%sNYj|L*LM^b@Xvjz2p|r z#2rl30;iZ$|2NQn_qhSe>CNzri zwNYsEuvx&|GT}`^r9$9H3b(>mjI1EyA*#D;tm`mZI|70o7HrDKb8x?Blsjgzd_McU z6J)g3t(%zh8q)>zr-QjR{VAPQ(<-<{C%H+o96HKIvx3yCgO;Q7qn2KqF6=E?LKPJZ zILQK<^ay2~(5+Q#gvA{jU1IdZ)9ZAf>M8>GFhN%`kl_Ue|)0S(BEd25)u1 z3wyKsheAg)irEWo)CA^NfNp0nLBi8Q=-9+e_pECR$96UV&cW?&3eKFn{*(bTQH8|d zYL;?=d~=mBJu$RJ_1SdOyh#w=-O8#yQ?E7}D$=u6$8T(m5qLGHI0$lnVS^o423UlN z|8^X4V&(IZB%SSF5`ON1t-Jd*E}k^F`@X~l`mx%s3x2wmpKA*;7oSXe3bdn2T`p^s z;&^wQW$TW@f#WkJ<5FX;;T|9OPf_$jJ8q;QvdLO)9?rmmpqMPUl$|!?qNe(2+SHVm zUTO1MX{B7Q5sxd@eM37?*z{aJQLDY9RdUhZ%)fgbhbLN!#2k-J1)H)B{#4ub!XfWI z%`u^u+Q2=vquN8euW(BxY)jU`I?n$!!l<}dEf?ldEM+_?ry6k){hSAmY3ey52}~(z z-1(X`6$!Cu<;qw9J{fMDIF0BSoRVx?1P*Wvib$b{RX#20pOc2`745KU*6rVoNK2B}6J1my1;!9~2__=UQ4JVVTDi&nx| zN7hsGeG@5TE5w~R*<_y&Zv<6rOtVtwH<>Kjk4!}_$X@^a^3?VfB22h?P3^D+cXb$h zX3fg}nu)$KikYK5ZJFCU^2ch*T{u&Jr8jmps_<%< zhrSnDvGC%rD_Vb$v*zX=QEK| zud6;E-}tE-oZzl$gqsYepQIOwD|K1BX}jitC0=oW?Q}17=2wasguOg`ERZ7)mIUbkD;k3iHI%_@krr%G0?!gzn_v zR5mX-k0M#{Z*&j1Vr!R+{ui5+qG|PM%fo}e($-%ZW@wO`RzJ^w+w(L< zX~(wJXS_G^cZ@ZsJB(zvx@Ac+Gl%!+oXY-whP$rv9uS6c6ju*qd1H^YEZB)uc) z!PCr5M-!CYUBeJ9!%qai(e!yc{_=kH1C^JkptHgFflO|X{1%;cRA>ZNG4GE5z?5*0RQ$oSGo(d#X!8G%_03V4 zZtcU>G-2{&+qON~wr$&!J=w0w_Qc8NWZN~_^*z-&=l!kqt#$wN+|R!EW$bIqL^xoS zUODwc+f=4hr*=N&QZ*Y?YJfm)sx#nJO>uOM{IUkkBLW%Q3hQ9tV`1&@tBM8H2IKEz zbwH?kns5F#c#_Ps6gy?c=#S8x+%}s8;=`rJTFuMrlt>9>$5D?EJRg`~eoY*kxp*)P zf6f&kV*yX~EQV89E@sl_@1Z)<#MVg8&j=|+wCx6jE;`=Lc9n%L8jB8CIj%bUFJ9}@ z!~;{(`%>^xK1P9fq4KJxI^?A(h2Wr`>tCIUbxUm!g;28_l?p*Qnopz*&ezEOq^(g` zxGPz9O4}#N8T>>e@bJ}3%L3gfAA|!eDaqYZ5Yy3DvmS->nb?Gzx zGUWiT-2cOSfO&fX3^ctI!Dht(UN6+Z_9K3`0~UH*$kYtha9-;_1R|cwfM_9)h)^|1 zQ#n`*;!8O>jG0wPo&42Bb2trZ#5OF(>F2K!x|1@{FB#BVWeIQ1KSO z-^=T|{EoYZLPEa=ofGYbds(0Q1!ahae?8wN;C`PR$I{o1l)}mzA!*;L7)Y!pm}pu4pGT+P7LGr$huO}Pa-u7mrIrs4vun7C8Lif zu4cZJ$AVDa9g)HEO{N7*mLXTNeds6o7drtOhO&1O{Ct&Z=kY3Cxm_>eAWS<{9KTB^4gndy+PRV}@j{|3V zLCV{Ii#{bO4@-h?M&7yUl>SWL?qtW-%2`7+5@3eDO^dR8DK=3`E(&P#nRKf5x+nT$ zh1q1`1E`|K*TR*w8dSRIlDD723m_@$Y~m`Hkv+H540=-}T!#9`8h(M6u@4H;EE_VyInyq^9h$XvZpoY+XMT0azIDXc;k{!* zML74&w3^zsuL8ct=^KX0zC9S-v4m@z-ZMZ7vDRa3RW6f<2@d^=Nq3!F>Sdx?eCQ0B z&-rb9JoCG&O(KAi!X1S+lA)H=u`4dhVxI!9t|H4Z*%5-ZW!s(E!{+ zwBLq96+`Ds(0PR?wkC^MdFxnJ)(rNQDHVt40MmT)IukbWcQW8Ikjt7WA=WcJxf>r} zQ_FRmq*!*SHu&=)$1i$d&gkLDN)SQKpd7xy$uHE}+6=CI(DiF=Nud+n6nyx z8_Iv(bOAQMh$N3Gx3Fb>cA7$)vZ#DGghX8tW+`K3&6~~8q=ae1Av>ID{0f1x8N`BQ ziu{&={Ly7)hEdu59y70gyZzOCjPu5(7WC9Hb!6wO(JSmDFmHPT!koLGIzMlBap38c z4K=wcGzM8G!Y&RF@))sre;4K&I<=g8B>#*7NA{P{WSiQ0X+^rnez3zoPijIVxB8J% zmkiPlGt-SE?X!I;j=oPy;~|Swn#Wb-9wh2eX?z$)m*Kc}lQje1izZiZOti=_8v*C= z*LRl8iiKaAx7`v4R*oC}D%JEx(YPx)ZN)_6M9d(neN{b&6I_J>h_M8y!uia12(VGP zO!&weej7BbEodaH{Q^4(V#TZ;RP>%nqMY9Zcn$i2 zMNr-fKCkC#TMbElw4{olcj5*x9UfB-C3!jeW$NCqS2<8bt;4`Z&agTUqm}LJY15U1 zKz;L{AHD_h4y4!qIVDEE=tN@b1(g-8n|#L=9{mixWkO~NW4|sG&+(kVsSrdg#1fVZ z5&gLPlo1Siebg7al8SfRzNW9hfxuti`w1RU^(F3^s|S@5t;M*L1*&C~++(2ErBb;h*~py` z1rI@fHSUiVa>qCom!3P+cINC=vf_}D;Un6l2oaXxJWn{ZYFE!jqs8=qOn@)`CE|PfU+^Ic3+qi2Sp4q0de$I)g%XB<7pnQB1s29Z~eD z2Qq#E?bc&jycjvz+Z3^z_+Gx6;!kQHd2kdzX+=(OAZH84HX;8>TVR#m#+xX=*L-ObDca=h@ z0u&Ms;9ZF7UEI=^@@ugs3v#VepYS?+m5eb52N~r1UhA~u+!g_ms=yQ{MT&ei^LDj| zj%ob`bdwZWM-W1?Rz-*f=RfR4Mpl`T9p=y$TkUhDMc2coGk}xcPzpYLV1G+ZNKK{6 z*^s6aIuegavM%CHTB?AGEWa#<*7+z6xkG94hLL{CM0Cids1K1U4O4u@V1X63(_Sv* z->sK^d4e-~4*FgFQ074m%>9`bhfqRaYZwlP0t5ZA+b3A zu|9QS+Y=gf8@mJp^7=px!#1g|I!0@)bf={J=Ar8x0SBHiD}?J+T^*$JLkO76$()*4 zEt_v`9VgWUUeQxtCkGmZd?xh<|^$jy&6amkV4%LSpfj4GZL9OQ70nHUt zJurAL*9SeHT_asbA>&7v*i?yIu}wRKE%5g2&UL=P(!{Zx@wTX;L?}Gae*cNuw-3~@ z@cC)kS4nUO*uw@0?kmOGPnGjAyyZ134yl>^ zUS=yw%c6D0I*DDh?D18!T9eG8tpZSOR30PWKqz+72yriaCkD4f;n055KJPOn8MEqX zeJ(iy>X&c!!W5c+#s+JxRq`n#*M;nMHu#AtDxCbG%`2f?z?+|9j25p+SGQ3FuW&250(Z)G z7CgR>MW~3=g+)YuVJz__@oJ{bHsCteKXSaYGoRXJSdT^G-i(HNyW(pt(gvA`c zhtLlElJ-MU@Px|xn6b)qQ3)IMaNvp=dOIR!WD~+T1(#9Qf$9KlO9Y&lB|@m&Fs%bJ zAoIymY}DdpstI~&D}y$))z3iCnA10(8|+kC=@CPPk|cr`?$2rk@w6#NX$W_tv;Mws+_1*=lI%D^tubl%gd7)l1eAnrcS8Yh zL+J0LIq68ax=;$J5$e@O@*ln<{8l84&tKbpsI2K*ZR4kPv?xY3hOLHaYKMB*&!e7J zH#u_ZCd^5B5bZ112bM0+d5NSdnpfomO3L!fS%hc#;srg|%i10N<+a=h4T*kP@x#6*+J%MjCAikX#X=fWl#F}*X(_lE*& z$!L`+^pt&3xYZ>5`h#E?n?1Jin*&q)aA54&y)49LRJSH|4vii9?e(zeCqG$dJU5>A z1eOdNo9T~pRB`_es{a6i=Iwg|mc&-ViSB<a$h zq638(_<}?Q>i|W|&Chf^B}{^0oB}Fh0ubQ^4pIztM@$MkiKUv3G*DK`=iq=|=R)e( zxfQ8qrf~s<`Vx}~gV2NTDNw}bPmP;6%7ML~P7UZmrG6qV8Y(+AlS?P^Ey<91NMf{u z^($Ez*8~(8r)qTf{g7iW8|3u5v`KZRqDQpBl@C&b3L4n)BARv}fE6^?7G%I1k~01_ z?`hC{{hpu>t1@z{Pmwb=Oc9kHFHl-~BacqxY!>d0Oxe7U=UMYr|EkR>7#v5ADkzii*<0{-Jjyi^NjX|+dPY4rMaFQ%Un`yc>AIoQo26spJMfmFBW_D;_!3_J5zccK z3)+YByBu2$-WJb4Y%J8!DC`Pzlzvxy`O;{HirhiXM#H2N2sNh1n+udZvD_{y_`6oe zp{0+MFayUS<6DAH?c*&KxKgrlo=a2L96v?s^gJ8ccC1ZQT8_zDeygZc>2PL6R0CSj zbZ3Fkw;PJCA#)r?z0>&iYab|geLHeq*Djn-&<-fnb_@bMXBbJDI!1nBOpB5 z^?g60272ZW3G+csxW3E}C+8z6A)#wnNyQ@0I@NEILNZ5&leAHg<@Q2nC1VKVdHM1L z=tA)E+Y)oZ!l0kqN8+TzbOTE)qtU2BK@8Fx5VXD>(>J6B7hQH>=(RxqVzW}7$wjLc zsVlI>f;o@sKQF9jq6NxYV4#A@>ID1yrFdAy?P#^#qZ{LE_`krEx*FFCgDSkm1pVZqf}g;M`c750Pe!mlr`R>z zN*|=T^pU2><}X$Ys(fAw4JYB&z)j%PfQYLI1yjnlY#9YqmU{mJDi44WS0fpvi~AI^ zNb{*e1MhH&PS3DubekFap+`YhavoVBO4+*8fk%7*O|0n-zF=;8&wh!tX6{hAWdL{r z8P>z%>~7_i>tA&XQTwsfRoU>#i8J7x1b=CNHzmLsaJVos3nXNOYNPQzoU~x24;O4H zhZU@e&k-l|sN1o!w({vHNr$ETNSO9{Wx9V|@YIYq(}i*~$V~024rqhPt&<7(`qO$n(mRZo#yw?wp0JkI7XF8TVaA2fYKdU*?3>g9L0c)_8 zSl>$UHfN|=Oslfz9U5z)$0xM-yKSKUw=-knCOEZNMQg>0uEZ{A+D$ zikx0W?9aW;gI-4m^Vj$sOJny!5$z;&^z`Z zSa;@Cc8Ig~mY%oOa-lp*?AA-;Hf)!e7ZWrTvnu-MgF5m*kxHglaaWki3{Ce4rsiBqsJd+?D z)-3u5Xaqs|h;ToaUe)jL0g~Rd5?bxNRHrYj7I=kbPh(E!!ayTR52(~BL@{fnTu3t- zrJ^#1r%e972;3!zdb2+xv4##m)okuxy}gHAj5|pavsC zQf0x@A_xT)*zT@PMZt*s@h6FTi5sqW_~awF6{6T!sO7T@#<3V^cqmjbmHN_TL-kIi zi!yPht&w%m!3$6*#~xhLDDC13&z?GW&ZnlbW;nhFrd??4G+cG7Yuz4IrFgGI9#Ri= zIROqn2(DJBAFOpoY?#Jm%nHFfmdm-;Z#ZCHg7lIm&wjp$ZPZE%(#>ze?LwgW&Y|C zHPpBZf0R%Hr-EX?PftB=Ys+ZXjiGbRk(u_*mONG@@i#NvlJv%{p#qUXT+w5|0`wSqB2k||PsfL!QH^1nD?CR4ZglG5%12jp0E;e_QBCO(c zu^6Z}IT=9@F}nKnJ=LQ{Lso%5$hS?Vhpf?VT>TLm$+FPn`mKUY=_NTv>h+AQtdr6m zZl!`2*t$~?s;Yb5abR&)*I0-qnEl?WYP4G~-c6sOW_kCpoB@$=+E9CkeyeO^J1+I{ zHft&k>4_QXYep%$IAvzx%9-bpPaQ*Ig-3a!aetA_Ey5`wnWA909od1Cb>q*9DZ`kwxuiYzu&JmCte0Vz9SYO9T33n$HE;-{ol z1TQBuAaJJ%WDw|X{G2_X>8XS`E_-SMij$+y@RPWrFnauROx8U5hqPA9x2rtm8x`9K zGbeFyz{ucy9dkm?^NMX=&e>?;S0>cz%ETHM2VmoxFFSOzI2H%aKVrpxYxbb)ERY-> zOJ=qUXEI+)X_w~FLXz}gQ3B;G=HP00+%Cz}5cMg* ziz`KnM(&TT`+$y!>+c;&Jo2TgEWNxYLxc{327<&d3n~DIE(iK@t~_42>1z)s{N7#OIyM)y7nR z12Nhf^~$QIM@OC8-Soyq!P$GRNR9f07BW5~y`mo~ z@|I$g9YjEBDf|#(y;)@K$anz5w#IOd{nA){LYinG`MDoE)kCtK_VgzsO#|bfJC=5` z9};>nb4`TL)V!h_8q}0Cfvqm1yMOKWMBAOHx_{Q+%bJb3_TJpq*qIIWEPBAX7X;1Y z$ejItbzM61U6Q5sIwBOCCcokTbxeu+C66|fcR0wXEPMuDSE7Rw+yR-Xc#%Z4Ih!YuP;OyLeOlA^+B0*sQm|ag!q6 z$wpCrQjpwh;T`QfJ1I$NG|kAm8Wetc5i!KDlH#T^Y_9DR^QF-kxO%;6;#e){(o-La-j*xx4;RVTWTX084g^Z4LE$PUL1`%oRnqDrnSu zL=c+cz}?dv<>DDNuW-K#{=#D6a)DCn*MxZVx)H$NBOS+zG90Z{-x|Gh-RN*$f%+6o z_Ec&#YOVqnsBwt`B<*`?3?cj~v5M(>cgeYzI{pTdR1A6%YK|xV;J>`FEAC0 zQkJ3l=$OmrmGXz2h-S!;ZO3_KM)beKQonzevdU%w&Iqh;V^Nh@JaVl(DvAu7FAzlY z`t)_rv^4s%Dx&{fXMzC?Ftv>an1uwaJ8?3ZLJKNS#-n7t4zddA}gmQ>MP!c6g=ZSI zFIjs`-0gpmE*w71XpTHKxgEWiItM;NbDNmpi%Z{&42h+(vwc(}gSwCn$HUVh$}75# zlmO{`@xx`|$|~UgV$R2%=O$3jh?xOtP4idb>|wK`rqZ&n$TXvv#z^VLEnRZCf)X${ z3gU%3=rv|z(-4!9+>X3a4UqDBjHkI?qj}|yZCP%+#E-cazB9?*s(F?j){1#36G8W? zgL2qvM)O3K^+eGNkkn(LrTghNL_qA$fI^XyuiS^{eVc0mo8q@sIr~!MTGe=xmWE!h z`xN0ehxmdv`rg$O}?dC$XmAuuMm<8d(xJO7jSoN9~Tg zDbrCXxJrGHwNY|dhiJPA`X-RMZ~9}7y8ZC3-_w2J+7w4Va(c7TGPCg(*e!nfId=b* zD80?s4u_o-@1x9&lw|=0gQ;MK=e>J+>n&|{QMDCMNomti^F|j2B;WV>A%fn!4>OBq z3q*lBAi67cNpW5l=$nuKQ~&&^0ujK}xG@>h8YRWoiEfE`>-BkHZLYDS1Np{^1PpWm z-=I0aAs;!fRty89#X&7e!iP%3o&#r>oAL0evYZ3a70YqXr-5ntj0;!OLezWilzjYv z?hb*{!(4LZ%+^4=(g%Isw4m?FAcbm{k?wg`7Tr+WLwL)D{beCcmsufYc}U8R`L74f zFc$;&dy01Tp$BiHY{Y{aPNisaW0JjDqb$TU$WN>V>06bSP;{J*oo%X*=LAWf#DSQV z-JX4q%7Afn7xHZ;(ya@HpwKVUv@(*Tz9c<}o!!W;y$B2K@-vVS7WKINHJVSN9Vypo z_Yb8pugcD)H8=h@90iB<5@Q-5s73|v7u9(5x7H~R{niC)>VW#c_>E*P848)=yb=j<;B*BISAoB&m>4jCfTqaEE zu9mi&q?+7jOqhx2rN~WmbfKoHtJ8bKGxfKy9F|+TiFb7zShiU z_s;0M5dMLM%MB!IrlMj|IcO-gjWa$6^N1+d?ah_Q*r?l8T4@0fVgo?yzRoPD(R)r`sca}v~^!t`ajNYq+ zq_tA70BlSTC@>5G8?3zNeA;vr6%qMyUG#` z&`S=jJm|jtaXqt>0e7uu9Y}Ey8PPRjL!9_)tGOL+hIq73C--$<$`Ncmai>8dJZs-p z@}RAJxH}xPoEcFLq-FlmzIk4Hemy(`bn1yEIl}k}vp!(o8N);Q@IpOx5RXsYH>E=T zmMT)cG3cI2?(8E9By*B+%~bk*DY@j>j@ngv+k@i2>vZk;tCF`>MO$TRr-k+I@#YSY z3q$8Z4_dH}U!Fgyl)ITIB3J9tc$NU=|Im>rL0S_IQoNL-op~73zMgq6a!8zoH7#)+D-P)DL)Y@nd znW4ucASwzcH!;Ocb2`ey4lxl^T?6#g)+W7R%`E>>K@Wq4ht@4_C%&xhUu_#XfYF7p zQHm~FYY7nGG<~umB%jg$FImSCf5vQ7UE`5FPb|6mP6vS2_A1K>CM>voTH3V zHwfaq9UJTfPLt9#MA~B0i(^nqXLly%=Hu@rn{u#VTsrt`qhMocqZ~07iyr00<2s{f z-Kolr%*Go+hx6Hovu;M?atuN-wBZX-67sa{sD^mydHcH(9w~`jI1Ewje63zlj8u z+obqwA`igm!mKmHhR^%~yV$O0%xg|v*dJ0ZQq4>vqF>vZHTm#4GOe?^hI8t;2U%bH zUv;Rz-ADs=c-Ls{0`|lxmV!)VSi&j{93A*X@s0Vdy6nbCM6F z$6WO*Q7cbF>PB^@HWc8$9LDdGVoIlvzm2#=W&$-MCSiI?5HX_vT7d+ZUDwD(k*XMm z1G=N`cq4}YRyx_up+!m(4d(tuD_S%tY+n+B|Fx4sR`&*~4zeB$Es6mve5B z4JQkGBG@lyK#@AVc#mWAXveADw@5u6?Br5ZF&&BK4^heqE&nkU1$__<1`R

)~%ef@+6sD}F*qGrDlX?zk;VBSJEsC;`~^2zoDhTV74~YZ2=<)Av#F znx~u}Xo_pWQ9Y=d_SRrph?3g#p%GRBx^#+Gi<8(Kw<1ppt25LsI2CE_V001?;_0W3l&jJJ1v8L6GasT5 zq+UwuwT><>x(7leYAf2s!3gI?0`%&?n$IQhymTSRP zJ}9zA`VM<2%}p}$VI1xBPO^EE$4mlu0euVE8`@N%&x%zRfp^_=Ba0qD^bX`LJ!xcb za<&=w@4MzXe4$ALqo4!wW+}=2-!cc6VbPo5u1nLZgWG1pG;XS>JM!A4_e{6t1J+HU zfS6eISZC)PljghQT5~Owt!3LAV=g#z1}k3UW5&kgZN%Pa zt!3B+L(Kfz_5wH^qb*l^2r%pYzK13`ZdO12>4cqY4niJD<`LGXebYd551f(*CJxoW z8VaimAlQ0Gewt94Q`L0Y9si9e={BkqCkcxlaW7K~lJ?UjTr+8gDipkw?T(^FqKSkn| zzHy)*z}6PN_{?=qFv^WKKrB))o0#?BMOlMsP0-gy`ov>}CGCy)_emOuo?TXPRTA-s z?0MmW%K~Qy0f4-S4hd{@hB|`QIas0>xG~z?%6kvM%HU19yN>}leLgA-@E5izwx_Uf zB-UcXuU{tVzs8A_?MDyq(z_Wl4)lwfZpW4vhI>hTqZ+aN9xQxGvn^MSTGC~U$Li&; zimh^VMaJ5X-Xgx^%)UPMA|su&X#FP3wHbCZHPHScpJsIk%8FInR-us-nfDw)AS^y> zgU2^btg?if3Xye2>24ZeG!)dY!<=o>HpcEzaJD|AR95k$o`_GBl7B^M+}P$e>c*rC^s%0UkN#e3Ho|u}CJrdD)uG!1WU4Dn2CN>o&BkjDyIABeHsbTa6 zks2fhLM&wr4C6iYTpcloCde0c>+%{2m7pKHU;MgC2|*rqn=GxfNA$UTG{Wxq+wd-n z5z^Q^lI1~)NQ?x|ic*uJ1zXl?JMsbyEB@AM+`$V5=lb5~+4wCrf0k*doiy@}jvq99 zOjnfLm+kcM8cQ{S3{Sq9Ra6{`z)Yd%pZ3yd`tZJoUE%h9tf}M%mN4llH`7Y&r>R2xxb<4taZp+bUSc~e zU7v>tiS^FXIG=T8j$MfA7%Tjx>a&;)R84N0S5ebkae2Gj4+Or|oG}r84~r*VUTSZ! zboR(6>!T9;HtB68>4e+&w<~W%>W<6OtmI-L96!mYQii~Crc{HNWi2`-jN=ufv?}W^ z^prIkR*K-{8S&UB#!|!0E^PV6LAJ{0VD@|f)6*`JAtdG@99sNH=zayZ#pcli z&S-iLKK~drMB{t?p*vv^AvsM}&Poi`<-e|>kMf#ta)rSxP*`E(iZ}@wARecYk|)>z z+-g#dvU1;ZQw~6Y%kr;`W` z`&!+D8$u`984t<6#53g6H+73A)m)oPR7<>n!8ODaqPHX;eT*v9Dqe9`C}dCmiRHqV zr{Jy3x*bc5>OAc@X60%o=v*db!cs1nH+oAb&eddYCK`U4QXwEAZ4#XUNmOQZ(?958 zkC?vT3%s>HS}k=N^NUKu)WkEXdu@)rBzNt0?6$M_vC01uyOAK)tMF}K71stwM}uU| zW8g}dK$t#j-QG$4c+*y9@h&^xu2;t=rV%;s$g3a2#3`mMjEBP@dVW$$!DZ>}mV zdC3Z*o6MiVR7WP*U$M!a#^}rLy0$L_1aj6`pQZ8CF`gdLHPb>a>-97E_VjS4S2h7w z8&LovRGW^*&-v#rZ*F{{YTTvkmoAg#dE|(=OUQfa-;tUZpMxxsCN3o{uwEP&0=DDc zH!?7Uc*vCBiiI1+YnhJjZ=dI}HTQ>-`geE9GjzN#mxC({wT@=3FKMCcN3jp--1!kG?)nP~ zTeBhR!{9J|tI)#FJ#O_E^UG&dB)J=leHiQ{j8pg2UNgLVPSA0;AOzHHYi&VpSCduO z5#)SKwwYhol(?qKTfj4eicf4<3?58;fgXl&4sMMNZ@RVv+1PC%@ttIGg{^UgP>9}h zA>&EjD#e%5N>^bc0|KjgTZN-WMRhic&bZ!bL2;Y$oo>h5qE({5Xv;H%ZQPGzxBj7S z0TOdPI9&F(=Lj6AN@LI8WZ?QA{G9tVm$w=7NN7t7MXdYOm+++6S;|3nae2YsrF`RR z#qM+o*Bt1JBxLQm*AR+>nkt0Bx%JoC$DF&u@8+3=G6F^o`G{$`a&Oaq(Ok15cYaq` z1q7hsYSA38$um@?4pO+ox&#)t1q(|XfiKr5-l+;UEF7;UGv#imlLZC!_Fm3>?7d$6 zTxa-ji&#p|_@w>SQpB59NZ6lr`e!&jW>f$va1WjPM+%I*(HN6bmyO?+5&fb*fA%93 zaq4gm89p#C$9n6!D9KybhkKl#^-3D?CaXcgV`{}wbBsI*1}qI!3wYj>H;nM`DNos$ zJM$Vtc1B^*qj0{by_yO2H-Q7Qk?qYJ9C4m%n5(=s_3 zu_!=Fnlj7Cy6t`&roRMpF?g(D)sBngp78fxX`Zr{`(gD~P<)1Tpf|d)GKHy<2shwa zIln^MCW$9rmWoeYI}YlO+>Bhbj_H&6ScQayJC4DJ16bDXbsdtBAWI;ZMGTNHh8K*JIJ7>dm#^v8qJEK^1@xxmRgrlTE0G zO{)A;5DX!W8=gq6USghAmxhbi~NCIRJ1+3yIMg zv5(E=bMiy-?cS6*%8X_#BPxDa19~42c(n<7PG{;#ur*@ZLQ1Z6aMmslgnK@mm4E=c zpf?Y?8Z9XP=uR7(@1Mw+x{$OjK#(tMqGR|szcGS|UIjfcWkO-)GTbaNyJij%EH2I| zoM%13UyWgqjVs0sD_|!&7l*eW1i=U*#_3+t$9>*bw`|9C%A8EOw6SCyE~(`Et^d#q zISH5(%RSOATv{u>zxV~Shol@Z8#slQ3OIliXzE{@tqIeK}*;5 zM(eD1+r)uLy$8eAU2yi5FXtyF(lG+QH>EY_#M(K7I|>9x;rOaGpY_9AXSLfK?!6UV zsU95<=%FYp+r?!P+azfA*oyN#%x|$}8aryAcAPkQp0zm<5<{DMz}p_9mO8WB2*WH- z7umsL3}SFdatqG{tAc{)^O-^D^?#h!+m)H}8!BqT9N^S(S0E%-+MELrSOm+%z6E_S zG;Ikf)&$0OuejhwG~BDw`?7Ff(>wZ$g9)(1t0o*=WfjKQjN~RsE!}*C1o|1gx0K3- zc|T(#Y3^kH>hVNS6{iJbeocSOGMw(%|E47vw9lJ^%kxoKQb<87RtIx=-2bcjm^)|@GqG^$d8v%b;$O__ONGIWse<5rJiHP-g^6zL za0vVJN%O}UFlxRCO*8YF4R)VqlS-NuN!Te1Ho#TYL}3Dsf9`vtjHGYA(pS8AT=f`x zePCN$MYMx_mg%t4=T4jU#^Bl2P)KcT+k{r)tjef9Jlz9#Xv*Asa8m4XVJe1FSA(A2 zO*RfMMfQCt0Mw)uRCzvhlw&m^Fa?0xO1PbdS*gyUGuO9H^}B;P1Uc=(Kvm7ZzF?9n zw|nPxHUtd$rR)Z}6YFTE1FG;i1{n3PH6ea%gWHyvGI5&DrX2UCY9vve>zAUp-Qi%-=q+lE2+UY zxl_o#X9V4m4cG5y?)yR}8W=TYg2Q_E2ZTNeXh{nCU@}W?R&4 zEHl{L-6#T{%|--ng}6cYgo)9I_eB;BklVkVLx3RwZ>Xc)hJDjsG#ezDzIedborAmc z6N*dBYyQ?Pvi<7(=5wN^{q>Gm!9&>hHjB2k?+!JzOS^-QF#G51j!jhXUpheic#Iy; zC}a{O3ZqYcJ(%}A(WEK8P#k`mjs+FfMFL)Ht9*z8OnUL#2?X09V6je|v9G*HrLoSI zwubyEnRTh=d8zj-6KWOcfJ26XTH>5UMkB!Ra;06uif)APTEu{KO*-jy9zXgC=v~yQ ziGBkmNVkw05}J73NqoAM5LU`kwkpKx_i$;(?jfc(13RaTwvt=pc<=zR0?lY8#V?ik ze6?!8nvK;`)@CoL!=rXLm+O>{x|dxmeYK@uFa|X0)}=S+f~amlnpSbUDoZ){d^dF|S`p3^JZZo!=M(@w4JUW3AA_q*Wl)mhiB6=Y)ACpi2*FVuua zqKR*vFX`>Qr)Dxf`zDQNRAD%>em9>VSxl8-Y$@rrX@qvH<>uz-{WYY3g zJ94u~Vh@A&^=IjZo@*IoCQqHp)wX+me-e(6p|OB{JhIs)NEnI>HrqM=U24r(@HZ-g5vnYtSx( z4BVS2rS3e=rq7RHA~M;JylX#P3bDGQj0)+J93*Ih$(IP&9R*=Zv1NVGbKqU*RtY-J z&evfbda+hPoulLTPFjvI+sD${??~L|gI9JBfqF(6v15QBB?a(~=iHLNp&9)Eg8b1b z3lcm>E+vRX5%XKr7gTgz7*v{LV>JX%0U!7)-pAud0DP=;Fi@b%k^*0j}MuFEtiy93h*J{;CwutTOIMPxav8TL?Z7W>Js@{we;a( z3IDH~H8CKM8FyU&jyU96k~jF^AoC4J@IPqYu={w{Js?`g%jW0KHo7In0IxbzfMlA+ z_zt4}ZQu)v0x~~%GtNP?Sv#<5A%bp-x#qD1pzoL0u@Lv>_4dxtqTEpOuA>;=BczjA2Ehlu6m7Rp9PItx;sw;T ztL+twQSe`M{Tv{4&(yLIPuca+6W&;1%@4HXW)s5@_m36Uq0iB z;I-=5>Q}g^YWTu7;BGkx!R2-Hy|tYP?0f|NyP((`PV;p?{@?b#pjePx-dy+3d&{qC zC?2Ek7B8q+Y7@wB%=g3uwmf=f;TTE!xAnR?Xu}!HhU7f}3un8ZhJA0!=vRt3&-2;* z@oc8H!23VSH4s(3JRcvG9d?78<~3&bsE;KF8);xk06P8`8?>tJmJ@J9!hfwLzTQ2b z$Y$!2ZSewQA>~KM4GnHeXOK z$dEI}Oh0&?;S}T+0sV=@_rG*S_&f=)Znjo*S7ZOv)XoLaA>6f>_QCY4w6rRhvYi6T z%+a{>cP*F#ty;OADwYQb@}qs{GsL!c%O879Ppv?On$=L*wamO5q;JsTZ^U5(G<*q| zLhHZ2q48xOPUKs*w&AH;@a8FJ#s7}M*}Qe^3yKFae*@M0lm6e1fgl};WFODB$E)d` zk>u>3Az`)CuNY?vnC7M5x>h0VL_D1_j&T3@8qh%|=nt1n=L(P>hAdc&`?9DuAx>471`Kce^x1B%q4 zW$sdh^<3LwhN|Q92y@ww^#(5c%>i(B6{GlR%}6P!j+40*nc>Xm-cNQ@|2_!|qr0OsxjZyNNUBQ_=RZELiY zNgDlq4SNi}?DK$ENA&KY2M{_lk=WrZ3EzLc*_$6eXoHCIU+ZPwUYP8f1UIa%82&d+ zS?_zZ+w*M(=6?`qLiMZTfZ9yzj%sSE8xOIqgfw7zqnIue;PbXp7zzFrfNvr~&3ktUq4!uwK3A?r?9_<);d-k{IE zLBGCYVE-37GVpQRj%zFQvsr+FqL+k^TLj>1J`fjDUvBbKY5)5V007BJO7Z!qj%Wm} zE-+);fEB?^SlsjbOSxC|8NADIT*^hyA5YHz@OL7hRX zzioAvB)*4oBN|v^PNl;B@)}xQT;w z;QxlkJ79R=;~8R)n-j+#EADmYSY}ggw4k?tNbr5RNk|Z+(EkC=2cjc^=VAI8QxM!8 z@{cXu8GeairlZ&!u?_$i;BtDO^G_H82%Q@EcxA^n>d7iZBkRNkADN&y?H=R1L>_>ORfLN0_Q1xG^(Nzb5FJJ@Fqq)5da6U02Ar|ddm?hK8`AkhY2`)?&wCiWorgK$39z z|NPSngzHQC#aXDT*8t2||5E>}MvocsJK=u;Z7Wd4-lO&V3p7zYbe`gJ7%_TfhQGWk zJj_bpYtr7;h)>3sRFj)h{XeQk1hbtr>u>te1WATP9BO`*MTDo=S`xf>Qw8uRbA6F{ z|EQoZ=os<(tHaQe|Nhpel{MZa<5$j?h>PaHo_zolSl=A#C`tUmze>;u->I$tB=b`n zr@K)S?efj>_#uE+1$RRTMezS?dwd`mAcIE{ZY5boVhQhlgbzdz+=l`7!=e^Z((fKFP&m0Z0m z{+o>GzIR?nH`#%b^u4<~+Bg29O0g(>un^C$1wqur`{@uqjW^G;G0cz$@J-=c$h;!Y z2Ks;Oy<>D`P1iOWcGMjk9jjy8w$n*Ew%xI9+wK^hbZpzUZ9Cu2jeg#D^mE1;HU#~el>jOf|`VV@= ze^nO-Aiz8RSfQfd5nMq!HjMnoY&$aLR>w%r{_+OzU~~lX;BP$#e69~xGohL=>C%m03uMkcE3a%=p?*f0dNzUdOc?b%ztzx%8SK2^s|jG3&i8! zs(b&i0hOrqZxM;CZ9oz^Hv>Wiez?8YD=8N6xZNm>((f+052by^VDzF}B(r0M8`e=7nY0WT=O2hV(4zu3)xdV?20Y(SJL zzZFSRTKksJGARvr=UtHPGw+RdgOT?i(1+sP5LvI2AD8%drhj0YG+{iTH;XatDP-x~ z&i406=Tz@*B_Y``>9;)&1B2-ymuLOAD~;XdO>mI->Pzc0#@*3<$I4b zxTYC1ocY&?MTWIVv?2Dtis0=HR{1q;6?d!GJDCCbJry?qd3$;@Rg}#5E8q8S%nwHL zh@RLc@WB6F;9Ud0k^)q2#whCXy_x@dU4m8CrFj5SY38i`Tafm6UMDX&VB^I9c4mMk zPTlKN{#_U>fTgi_aSOw;kapw|6SkzP z4N-n83@^wERQL4OTeHT)b|k4|roe~ax&02@6T`E6L=CM`_k8(}@BTM^bn&b){6wB~ zPJ2X*-xl__Ox6>Um(h`bWfHT;O-6e+44I2rf-dx6H93sOS0r;~oC#_`D!j@PO5wLz(;B zu(h0vqYPga|3{=X(RChP7Iq0}15157ds_n?i}#R~t|>Gf3mz@rdw`P@k474g4v&V< z+}^;}Qper^k4DhI!9>qM#8$@{k4DnK(#YNzkAZ=e36DnD|7>s(jd6Ml&tXSJb1XuHv4V>eadQx_b8Hp>=v@{kF%prTTWY49dUw%}L&b z8XP~4lswL#)OxZYKfika4gnN{Kx#XL(fJvKoRpdxvj9)5RZz6G?^SbE5S_f?l>d~; z;n?$74!jeDdzc-(Oi;FcOaL-948F^cCLV<#kN(s4>=krA5jALOVr(4zM<r+gt=eCNHsjFS`5@)Y^ZXSMfkZ%1`|V=bywZVL3mdQ}SV;y{`i(Qy$=wT~yv^jJvS&Pta zvxOyKw)(lUWvF7dd!rhCfHKig$)5eW!{x}tLx37M*7NvP5gSmCvy}`(y6U~UfX+|` z2t?WLC9tA)zc_uM%Wk^V&)&cbOd1b0xCEnzT_MPlXm43ek)jF{ub2=RIu z4fdT774SPA#XMfspuo$ls&)_~;|*E~5UCFg(6X@o)j)S{*rZR}(6-?)F`X*hgMJMS zm9~PT{2=pu4Kd5axR?usj;Bh;!??5i(3rj>ZNj$bXxSdi?p z1d(3BzGVXnA|4gW&#j7&HSsuLH(A#jskY}+mfM1hQx#2?8Q_G{IohzEtx{^hiE<%#b!hGVTa~>1l<*Nk3$gMlQ~niiu1s* zk0r6j88#X%Eyr#8Hww%m=2)pS^ApW-v|{Hht6tQHOou3k0*9ZveeP(crA=wFT&JJT zmmiGBeqcwXA^NHhKoKw9qn9U%{fq*Q9#9I~4*NfQu=6dqnX4{Q2B$; z(G5t{;m#PogXs$p3opkS$1lfd$AKYaWS4!dJ+I*_1FOJG|z^O`Cn`>Y;C5dBk#!eEVIBi(_dNC8A_S9emvc|C(KkSar2(uAY{dbsu? zgFw<2cds6iUIQLJh9LbJ5?cIsnop(JI+zEUkUwb@6xmiQ+ujZc&B7A(!I2}mjgmg3 z>F1Lee8QMuL;NZf(5E=AKTafk)eei%9bXEISjbgMXV)E`qUb<%@vyFJ09xP^8&@NzdmFjB#yz?S}2Ngs_oK`o=j+VU)TIW*Ps{gkcTN3IC@;1RQ>m2|9bs7cc4@ibA9nUm96B zmyi$R5{!O2=r~jD_H=r~Ak{r-w2s4AFzx(R#+1MkUG$Zdl0E}f^v4XPnBROYdL6Vs z&DW{GIAAt;Y4n_Guc+YOLfj_!C`^;PATxyE7=ZMoc4iC8#gH+OtHw3N$%}&zJ!-+B@!f2D;4PVoLZvDFxp`y3LGyAIl z-If(3X0}8nBwD`|ShVDMa6>o0E?ZUdyZ|GDQSgx<3mtFs4^vsGB56gfFNy5oDMINY zWqGr*M@1rqds(J;vRaeIjG*iOXy?S?2~w#t-=vqsIe0mQn#5fNvGS%4dhJOYdAJfr zbI~R#4pR4@4oDB?_PMU-J~9P>ccmlriW-Tc?}TCYYW70)GWF{9MujAVl!a7?mxl1> z3mW7PQpYRD4}IbKB2c_od|UilELOZwETPC!%v_wI0GNg1my#M09}%I_lq#)NvB+KI z`*mfyVrn)0eHv%RcgCo2Xy$9-nKDnlhscY6KtMpcaFlSPaCR?r_=j)?OgJV@hVRMc zlGFfNOV%yboWnI^@?i>9`WMkWcSv&Nrin@ble@^cJ+pp^DZsTC@f%;bERFq8fE zbxDdtnM204;a2kmX0e6c9)}}c7ROuKe^ff1fj!dRM#F4HTch>VsIkd@?BI5P;`;ce z-etjM#$|0^V-jn8ZXz=;Ge@gbtHe{iZ3u{^l3^Wl8?%&=Ro{AQy(~K!zfabDsYO|^thA_eH!!kFG_9LHE!3!JiRPMXo%YClafkR6$Q#%I`yH00 zcdFMlatx#G`!kA2NN!+mf7R^8Po$oKDY4_4nS)}6nT0;pP4zvuA)yujE&1)AWBE%n z?M#cmmTwA=s-c*XCP_;(#<@4RmVDD;*6Lg>*)5|jcPz^+J?l9duU5=ftR$zC(3x!} zx_5)eNZF(ir5BkGY6Hvr6eIILTYUC(c5xo$#Nv8tHgBeHu60_uoV|*;(mZuv4LoG% zF^V3e%VcTIX!UOmbo0EkyBNAyI-28H@c3zcSw0Z?T=wkz%JJL;q5_NotOafluJ+*) zlc1JScqOK_MzQHlb#{J9b$0pwXJqFa%@5MIyw)77>qZi*#zJE9~*~ zOty!MMR$7UdPH<0dUBL34i?A0tWWg~Hfw!Tn_HV^Tb@$Z5@Zs_GSgCN>GdoZ29t4l z@nUx|I59f|7UU`94Dy)rn)v~FZj3aO4CpBwXD=2~s-3IhjAR+)8Ey@mw|XuQoqX-;?P`ZH)*-G~&NHEX zz>dJ&`8_u!FSXG);pE)@QTwQNV(&p1v*l=wX?BKTP zF6l4Th}dJW2Nv9}o;ROMqrHAcF7wO1Ffnm|QY-5?iXloU#b?0rt`Du^hWVwJv+`~EOKo_q@SZj% z3*82(2`v?fiZ3c7<>f^lb!lzSeKaer%lA8hD9IGmW!LHr7AO|+`ZFUl-X(XX6PgN* zyAz8=W~sAPvr1)8b}z>_Xr?4H=4)Ave687Lde^X`Z zoPQ}*-YnK;S%fc*)PA+{e2Tv>S>ot)WSm);nK{lopFm|u_c?d0}G-{5b%k{zU<2CiQBA~nui z#xHa4B1LFiG|dms_gODv`(#9@oi$AxkH0rOgp9?{mc&+M)T26`T;D_;@hq>e*3V;1 zIo}vwjx5y~E*~~h)vvgc-i=;02{{IyQU!#%&w!ReFu_UT&c2vEx53&wJK&tb9-Q8@ zUF^9=-bZG<%AXH4WrS!kd6YSuU({#MwcM#_sjS75PGw$m#&hEwRy<2R)lrRMXSmvT4PM8S(+AMe zqh>?ploD)pSD4@a*rjM1NJ=Ex&c;+7#b{>C2y2pEO(EXM4Fk(oPaMFxn0Q3l`HY zK$uH1+1c+me7vYuc+jPzi0hKt2y&7fyRP{dL5t>9VU-{ROsr~=Y#e=3hJA9PI=gH9 z(6+Gl*+b!ltKa2G>4M~VEQ2pBLv_vkxRj;^73h{oEfuG_HC_p*#^M9-&PJ)s86MCj zI8e=M+STU(bIq9U&5xuop~uJOcEFit)D-(HPnkYegKYt9cgaJiPs$txXguD8Vkis- zLOTUN3M2%Fhd;t5j_JSL!M|;_vzd$xbWx^dwxeD6?<>je$3gi88#noLdaFcH>qGl_ z&TM$1Gm}DKc%+*l8-LdVa`$JGtrvsnx8ed4AOt0{XTtFlirV>ksN+U)w)f!+i7-;e`(9 zke~%#1x)PuoWrB!@D)d)=D{WN&%$G=K{^ZY$+9AY_6P(=!N>4EjFS|ifAKBO7Let- zCbY+Fgw*hT%z_xlWBl*}?Cyt<1Tv-bX&$K7YoU?@8&afmXkE|=B@F_teQ86y9+exm zt`lto>l}#EU#l|~lD8icbV67K66->cKT3rN7y;ibnjjCPNGK%=HV>;ITr}Ed8}tKU za5o7ZwHIS6P&I5rS6OFPSDbbxNiRuR5^a~+5T3#N!6#pLrm8@Nw3NpIiWwm_oT^({ zJ5u{wWnvZRyv{KPHf&!z#wND4M?Jbay(LpQSRLG4Sd-UL*QX7V3#aBUtH@_Qa2HxB^MS)pz#}pzQiR8Z+=G1aL+E6b^DdOJCkTLd?pD_& zuZo!GYZk*Mk&Zo9bnfNu|-$q)s!hG%EZ$mhKu2m zjUyvLmJ!Dm;$M(v6K~*k=5-dxm;Y1u)R?X&(<10D?oJ~T%aC}LxHV7~TM}&+ zf0XD-EE6}E=$NQQok^;N?t#wBUQt`Xs}3&Bf^sRG!=W$bB(*-at&ob zZ18P|YzKP>bEkSx{zE6N_C131 zhVj7hk;RfqRx`*m>BSbsGSjrCB&Is13#Lyq)Ft6#KgJw0#?rMI-?pCv4=WpkUFsiA zZ@Lb-4r4J>Fk3M(FqJS98GD&9QY=#jQg%}I82f6B)nI;ssqLuk{nDh3Pd*)1G$gJL zWy>xrKlwp5!JW5K!d0eMSgm4|D_uQ`q!B~?jfJmHy-vZAUq*sPg+{AF?kt;IF0(8% zuT7-wo9AHrINqXmC|_zvVRlG%n@)yK?uH&=adgSVmt3`6j$T&1TAPpy?yYYmmBHy^ zF=F8ZQVR3atBTrM$R&327R7)zL%`f@6lRaHxcXS3M4IQ!jlol_quV7Nk6Leo)K zP_-yeC>5xVC~;NERl!v(Y7Q$OyB51s%rx}e`vOe2^r}Y-R(5}t6;Z7@w4Jzfq-a#G zpf*W2jorfD5T4^bMOYP`!SZXeO&!O9*du5($;idkG z0!{)hS|qeD?nmq%<`|4-P-4Z`=&u!ZF?YIWre{C6*|~$c!?}@K9a>A=Y3_AiKwcak zSFYyH_a5h-WWZR#N}=|l^T33`lp(lb=%En7zk_vk0CzO`;L=QL5vfn3g77~JIQVly zF@4lPd9*{PrerK7Id-mBa`JF;1f$Xw5)|le5{(3$93v7Mjw*rbC_Et2BQzr-CFClU zC?t`nLaAE4EgYs6hF8Faii?HfO5|W$bT_#o5x8}{3B7en&7hu8eY|M!HQ>2}>C_Ci%zw6AeW^j(QKO@Ask2#9@$%v z``3q+;ak6xmi*iNC&zUMNXO-sgT^bBWL;Iwh4J!rjk2BL%bd%)rmx4X$~xa{wBbx5 z^&@NO3xBqVSD1>-{5Z@z^u~-#*=;#=tvg9y>|2^^TeI+-0uBf5f#Cj(jNR_U!U(aBUv5iL0dR z+JVgmZxW>#1f-&HVqE58KRljNx4OP*E^{5Yuf9w`6v1io47;$t7{BNpZ{?6e z%Y4Zw<|%ubxt}vEYPmknn$sSu4l;@is28Ali+`w)=>i`s8lw=d5k3?t6B!G82wQQf zy6!n23QL^Y@^2%5t-Xt%D9QA2d}=Y$9q2wcn>b455nnBAWqX{xwb;|&-=1bBEdOX`bphBA2J2vnEKn$nsS-5osbd7Ai6ds|cWi&BNE zz9pW@oWf?hSBO?K_Up~HJl0YVS@>sQrDg0S}hM#(DOt^?9v6n(1gLT3_;UK z=1Azcef>HysY92dbWqg3vgJ?vaJ9ENE3M)7lv|jl^T!pPIKd zS2#G~2G-Y>i;&sWi z8kjiE*8*YI@>u=8{f2wNaTu@{!`)6{=+t-BMX`t-!p66imWR-%kx#6jP*IDX)-~JA zK<-u`nn_Os9c8T9hg!&Br^ZJR*M_^mWvUv&h}3_4j=XC9J0+MJYV2)MJ2+64~%Nf|jf~8L;2F zDh4me{3D(YjLRanz@v5?aVw5*Wgv!Z^zsspI$W0w^jq}dKVb*Zd>Fmcn%&mf7;aT> zh!Te&9CD>?1k3`PA`g$q?WCK={Ys;~G_KS0E5$o^PVy;`E|w+tz`I5$Q`;|zn5Ot5 zc_l2oQFSR^;f{}b0He6SXR35MK$+|Y;eI&Var`4*NC1vX4wCm-dOB)F$i_R!Kw`x~ z?j$1Z(_oN@+&)+-M>&P_Zef+_WvZz&B-!fRlK;f1fysf3ilcG%w~N9Lao)R)oYMvhrxCjHM5>VP{_wVIEC}Byz;3qXKD*EnG*vN$ zZKgOUL5v<}FP44ul_ZanQX!E{wt#Xe;fOH04j7aGkyEC7B1W|22TlcnC)} zO3f<>$rD-_+a>^RMXjl63v<%VU;QUGpAt{Mcj|d=clhKo1-xuzkFH`s9G?I@x`cXa ztQn>w`A7i9lS}JzMdOKY7cV%i(%nZB#HWs2V%a?^FT9-Gh7yTtPf3{BJJtY$RHOhe zp&^3>MgnxmeL+K*z@tl*uU{jaNCmlSq>jb~lf^QXP-PPu1w(nDi02gbAxkp)j0KIM_b)(JLH|Qe_RL z!Xj|?BresnF1N6)9gAX}qp&LaSAi;f-6tAgB{{ zFK{s$Iv;4Ndx4Z85g_?rzwXUyvx|4t&5ub;U_ofk`!EfCI?1j#5J(rMPc-rALDQJD z9DsNhaO!wZFRJN?^<8L*&5Z3%lMwV;5VrLwSkT(fs^JJVp839=nJ`=h4V*jD5)KEK zx8y^9Ld4Z37ZW?LAcs=>*dKRg%Az$qXSiD(U4uwrL#QkyLx&>atD({<7W2LEBeivC zT~d`E`{cp626Oot9-V#a4ieN&!(@*tc{ex+qM+_xH|>*0RT&yZ<(Oe&%y_I1jPhCM zB#`b91R3DRO*}#&;XYz*_5^amRyvkASe_TNKSBw?1P*{$bB5r)TcbYeT)!=`4WhDr zK{%HPbbl}2=s9iaJrC0g5xf&u^w#QICHa@ti`VG zBEG^e8%;3?k}?SrQim^m==XKu%z785B0NI-BAA4`Nsg=rdn)E7@!K)`%%;vcw?r zTi${BOOiNc^@7|TspNnHkQE;gN2f=(0to|9>En_{e{1r)TzoISx{nvNLB*?zoIeF4 z-B=9J2f`21XayO7$-CMY^z>4Oe%4i)Rf%<@M<9#VYmrBdt1&@){aPUNYN(`&+IFfU zLA%T!dA@`{G@nGThE*v^rr+}v_{rMo0HP|7-$9B*rvOsR?^ET(xMM8k$=I_+@&Tkx z{qt=0{c6KUAL2!L?{7(xgmX6d!XV7Zy7f%5<}h?_rzpBLRZ{{7Lx{l`0SREN^UxT% z;qB-8)y$l*2Ba4K%*T`#ltJ2RA15L%7skV$3)d{EjhD{ZzZ z)>UFE2%H9pV*3WKiXVE7EhH&6%DVKRFTguL%^y&G54mqpw?eyV`OtU&sy#F)A(hn; z+xYCpKb%L1(@YO#CNTyENuN=00kY(2ni~!G32YsfIpk)0UF4)YfM!fEh4d>7C{0&z zw{-)oozOF)TFkG?-Py-><{aEj^@L56it8?&h%+rd1rrWR)3T23LVg>NN?AD=IOQ@r zkyLqOm&>EW80DeP!gC*eHP+X8gKfn?GGR^-om8OIv+k1KUX)*((QdE9ik}~qip#Vm zlnIN>=%+~8cm?V~sW(DR;qew7w&l{@G*1#qQ|Sh_G54*%Puxv8!B5L6D%uQAV^jb<(uix`g-scRI(g3AUJU|O&9)0 z*GM#BW%bkq2&IGi*0cf~LpTRMOFQgHwV+A=BIqUiROBn@(~rjT!SEzE;fiUVe#R~c z*}>+K&W-&vUNB0t;-*8Cde%V@ZUri1dqO<=q|wO^F0x+H-GLYO>M5N3(J!Mt!>2hE zJjrPcA2bmRAZz4cC4P!EKgxbjrfX3gNlM?-OCWb>Ln1=nq7^MvJ*ed@zoUqS6!_JK z(l?VCRwMwg<8VmUiJO}nv0vCc6|)OpuJO6YFAFxrJ*kV#Y{m~H5yaY* zg$j45{G;;E9E;;=;Dz;oMU(yyBShOJezIJx3^oFAO(IkV)>^wx-6jocM~USO;RuJO zVbBz?F;p#XUJeDe+KgF)@XFlOwHBgflcyvnb{JZ&9Z`z=W?7U5hf%s^oVZE}D!83J zvvh+6g2P`8<}CJB(v=8i+f1Y))QOx5)@o+2yNkFi9i4Vm~ar!ckJ%R%v+O7l@J*u{Wv6J&TMDBlG=H)1=|~?z<6}P zqyYWLXvJNWL6SQDM@xD#T^cWxR4#M_0qYX{4XZ}mN&gV{2rc~BbL_9YaXJoF{gkLr zuP}*rvSeu)cvE+zm75g<5aorI{@(KlW5Atsjc&Rv$jRG+$wsc}h#UJcUrYtbgCqAT z;Vk8jK}0)x(Wy2HVOGwS>;U`REO%mYNMWIhS;5rpufm05YeF>b3y&ieNk{2__Sv>k z%!9H5UdKW{y5Qz_lpnQRrZPW9atsl0!K6`afoOT2_tlI5oCb zP*L5&(#C{BdNV`&Jo3=eU(SM0O|=}|7Cn}eh;!rnS*e;Kfn5cR6h!rM2Ib>~#@!dW zk9T5U=s>p>YHb=dfNxlb9c-*;9!{&lvA~o>;q;dAv6Rvf1_Ej@kC8B8NH%PTJ2EJj zw+b{e6Kc^tb%Yk-9d|$DfBd)!x48^cjbKn+S-n&H;jDQdOk2JanqbYhgcdv5xM~nb zezH@>kDg1<-~tkpcYI>VsDzn0B=C@KKDQ4!f~ksX*SYOOA85#T*#Kl(Ww_!6}M9Bj9Cvbf4r-`bi&Z zco*`TWLoU}b7TxcXWh@BX;&8>TVCprd|f#=7f!~jnp#_%SkE7$41RUIL2VdqQQa+> zwQ2xXRZi;QNkU3`SZ8=vp;Ke_rPY65f%dSjLie$%)&pG`>3-?oQ zc4E#K_YH=>MZ4oqN>0c2Kj^livo&CmpRA72U!S%HmiBn`f3b5Kc>_DE@3wjdc6ju3 ze_s)>va|<809<${?DBXtQU>}aI{a2n(BIxKyw54Iu+eMa(a2lbzpovnedp@`tTFs6 zjepk|%Gz4#DHzz}0R{zR1%>fw6b+p00h3!6fKmYFsQ&sE_~%;;50IXKmARFzg0+sG z0p9x(MFBfHy!S~ld1ye89`Enb1a5A?gchKHf5lLArpJ4)C7_Io03t}w@Nbv}kB;%* zFf$(A@9T85(Dc7=0O~>iSNiWy1LDxXuaEpIDZs<@|H)28M%UCp@9!MNEC3JvEjd7? z|3|&wul-x^zlHtpV>Alib?yHW|6eu6qxov04_IAE#{`fSji`Z%k+D4<8w+5bP7EM+ zTWhQLb(VPcw%-i^)&7q_{w@xUf|QP(86N#VmF73Gx05xn6|l0fwz7P0Q`X;8e{`%& zbbx68EZ>Cwe_XzK+yb7aG1Joec2t&lXyLlFw6uu`2?geuxVf0)p8x{BxGAqzs$%0P z3JFidw|Oe9zd4{HOn-VB{p3mCAdA@l)==NLyzwy< zv^1IPx6X7_8PsOWR}WJSz={7$i?-KFW=RmI|1z6HMMI-euRr&N<4$66)b#N1aO7)n zxj9rSQ7%(&peH2MTWS2BT5_AhWEK(84 zzT;g^IG!IfT2nLy%XK}D=nNez)R!y-#O8Pivo1ZZ?`Q`0%iox#sb7ayPFR zso}7YUsPmbw~&yS_;&cUjeWxpi8(RL_o*)er%AUb#KAsZ>+NxF?P>LH)%C0e)^Pqp zAMkxQK$^KcIXQU&uOR+s&pmHI+LsrDDBJ2Mj8t!RW8;P=3MGtS-OTcZ9UL5dT>iG= z0se)8!t-$|Q`7CDr`}#(UVc$d4RBAIqYva|6yI8qxV z^7He98{|G+>hL%Vt1>Y&ODEIsEUmrXD#&mNy>53Me~BTU4d|uKLc}JnSTr5))TJKZa31HrR&HBZfw&&`5;U#I zP~34oD0@w7FQ;>DuQy0CoHp0{6Z@atpKg0p1xcPxo6b~?(wTI-A;B^!%IkG^hm&nA zVq;@z8n$CPJ3B|j*elvzA8}vL0~578k~a$vv8z(()=n$v80JS;(yk`3Zyj8fGglYrxSK7Ut%nFQ;R4~0a6_qu+|r?vr)kKMSF@Qrpd>OM|EdO_z2M^=dSx*Zg* z@^J))rkU1kwltFG72O|<4V*4epzV2wrl_k>E|<0Ynm8^8Um!qs6QCIOhebbf0;8~e z#4@?v&kx-m4}Lf<+dyE#=tACTH9KASfWs65{X|q1s=#0{#Dky60f?Dp)oH7)^@i%u zE6PXMPmxKx9jGR54bbrW3^m{#hiz|9ZD>?#aVaSw>%RS68E9H=(YNwozlc5K@)I>( zf^Fq&Gu&?%(j8Zm2d$Cw_epskpx6{XsnDIk+?XuTq*a6tE_&)@3IYsA~JDdhCzJY=OSv4p0I3PF7kL8bw=F_I=oKFhm|EF(A|&v%`{WHPT1W=cwD5g1 zE%#FxyC@ka>fx;)S~Bb#xQ!>8uv*hTv%w zSPKL(h$8*dJ%$b#&InWMoLeK;fQp4>TB)8?+$}+MZU<&Iu8G@~%cX&Y~p6NCS=o{&3n^ zSP1v#L^xjBd^$PswqVuC=jGmNrlAk-|ORy3Y z(ga-}cD2q4HSc_Ig2AfFq|U60E-0W<1|J5F>bD}yVo@>(mRV~cH-ftRIZ8`>qfibM zMZfiPNV12LlvW9kC$>Es5T^@vT1x!kz#7rK%VX`~8L3;Y~={yQ@<0;}E0>L-1zqKJx@& zPLc((6!#{%+})ZdPbd)s9E@u4gY2_JnG+=PQ`=SgSrpuc1rg3{q0S*H1!5oD7BYX1 zi6>RMpcn*Q$S|(^^#m&fa}p)$ga7<#eVzc8cU1jHgbhit&^#D%17~`{el^TR&g)ZPf*5E_eGjC(HFr6{!RT9Eft_e$l-C(6ZlrH7Y>MH>C}N zfE4OvtK6MA>G`y@meYG4EhdJH358jj+(p35sS#=r63KO|BFQ)Lx=?M@_eBItEYMl< z>9`v1XjSm@=WAkUA+*E#JYSSOfIZB+l%&W#X;!IA-up(^7G_~8x>Iq#f}&!UL!csI zWJisI7zL+>;ezb#2MSCgSsC)v2cIZeTPR&sI*@UlKW0f`2Aq+$cE!G<62&bqM^TP^iibL<>0PguoJ;%bNQgFf{D;Sf2wCPQ=f8 zx>A?hV<<4gufQS8@S&{Tp^Nl)&`1a&szmTQ>pD87Y9kEIPSU6nWLLTtxWwr}NpF9` zwMN2sI|Ps9NYYN3aocZ>=hmN3$X8+;#ihr|R#wW`=35mE}!`BuOrzZ}PoN5C9 zn31y<3&%H?b2qg?(Ix|un=^=$z58m!=0-5^JpUY<>`OcTmoD8NoBjo(OeyZ;ANC1a z5w~0C+1Kw%Al)n8Q>uY9vll{9&a1@^z;aI0irY>Aa3HZ6H_=NN$?4;O%*D^e-7Qe> z^r{#KH4~awwofLE$thIGI33Fs*OR-n#mgL}6%=k%brDq|0LK+)nu`E9v5~uwl`Da0 zyVQ7AsX2D1NKEpe-*_6AV)}fei!Ee2V2>2hbF=%u*kYo}_;<)kYRYlto`l|0n!`+% zbm52EL6Pz}y-Q{bAG*jIR#6NpkPG$utl@vU zLtrR-G>}a1yeSP?6`4O!-yhoir(gNMbn*{_{{LAd-ZXgLrAOr&9>-NUo)rKL_v_|H zi&vIe^$)WGV0`9WU@>E??xztP0ODwS(PL(2R+sVk4e7r%;yE}`We;(BK|#Sx$=5pk z28WZygoFfUj=KEFn)2r6wA#91|OJY;e0{u)UW+J3%)}3RXuUD$ohTn;4^BKl(#<(?ACtv3c1X0#c=NCpN5Vo9VLH$x~!*^f%UYPtW)%kv{4 z430}kXnlP=LesFH07p|lUo@*gK|`yK6##IVk^cUms_pIV%0SNr-;kL2_?u}3nREak zZhM@mNZJr0krQY91nmOg17jR#%@Q;%HJ)#;Gcz+2_^5u#ELN)k25tjD>2Cn;m&X3x zlruh<5#Up}4+hQw^qKO@mk|DWr=9q(qdaeJ#diQqg_m0CIbW&s&Wl@be^t<>ANr$d zib_ZjVlYLjA%o|XN3~W}+1SO50@y5ozjW;kCB74pVbxSRy$@vtasY3F?nd2o{1ZSX z>TAD?nI1k*O3=I<8}a1ABD3gpf)z`}@&L|PZm`?av>RcnAqKG97v2a=7{8uSv^PKl z_te?l%zX82$`%@x0-p~~^5P%g#Ozdc^4Tecknl-v!#H^b5C2&4=0khW|E z{^*704~X|+#5f7*??Vp=^04gtS?`o4=nKz%2ONO&@nlsYBO_N%55saCd%d=s_2D>j zp4M;CGcasi0X#SQ1mOq&{_`%}SL^9QxnsUTUL)<*DFEGOQ4D(LUu>uITe7ySj4s#v zpSfJpY(kb!QvjcKe1FSj z1`(i|1VO)PF}>Y+LwtTGy1+7uOkTN>6YPTqPk_<|$B-X%*DwD$eCXB+#*HRs^mJ0E z)o5?RD0Oh~BU3nz_j59v-EO4EHKAfCB;+chA6hpAcP-&TR(_h(W^lBdcMBLU%;P|y z@9YuJ(^(r2fv|p3$B0k8x4yTUyn=$SsO^$PT@~NY#>Pf~Y+$L3H@kz~1f%#?@hOS} z?E)UxDfwj|0b>jg9T2#J<@k!2hHBYWQJ|atm9<9fG!!>;yoBW>g4$GfeGtAJ}nC%wP5dA#JPO!mdVF72}cw4F^VNbRu~ z%L>n8<1pJ>`|TZ!e+VSA2f!gAg-yckHJ78ZAkHTQhu15fH-^r_JWvX)KZ$0Ucl#c} z0p5>p3!@AKahOA}51-9xRTKdAi^d}ulZ-RBi-G+K8W;eJgSpo$jLd2{jP4f%;Bh3M zbzzZl5v<2~lN{pSJZX^eNo2CE@V2tNpDz>Jx{rL?5oK6Y=nbG-@kp%ceueOFsj6)U zppi`^OtFa>jd9(nH`p2|DjhzjpSiscW8Akb?l2sD)PiUDkpnhOt0_SmV{Z*jyct5A z4i1CbZq)7tY@=+BXOs;YjRKpN8^QiX-WA@N_B<=Q3Ku8%K(7?xdNY)UUJk}NP=HXR zl(Z-h$sPQa;!pdx3}tR3W&{|nTfN8MO%linOcFt=7>C{r*mBZ=MUH_*MH{9C4C%1q zChv436Zs*N=ttWEWFWDC2cdHoY!}LMo^T8Rx2bN2Q}JIlAHuENKp!Bz30&k@!UQ5Y zFCjmp5EUp$v-w93`|)4%R`Km+Tb8c++VZbNa6j&+S~ijc{%nuU5yS$2!W-obR{Oyg zq^~$EoxGXWy?P#YDOQkm0PM4JKBQtCUV{C&zcIOsoUU?xQceO$;4bY{$SmE2d8Yeq za0dMwVDvFMBUxQkh4M$2!a)%nEHS-N8ysCE-c!XN{$u@$7~ouS%$X^>5C;GoimCMP z00u(&=!``s@#!|9%vSK2w6pmvc$;{&bvyxdWI%R8ja}?2R>l9r4k#UO}8Iq+tcRa z=hNLVxD>$*Nx0BKA>4I{j+x1@`9yvoc>ePB593-H4?n*d0}G372({4?jMbl;sB)CF zN4Mc^A~LDNj3kqxYmiD7B_`8P9v$PA=PO1M6Fly34J;G`>#71DuqU4l4wy9TjHAG& zQBqQ(#OxAptE%swCMv+Idi(^{dbe_h@7NtUA1o^XLf_{Ro0^KX6A&Ok!s!tTCV3XJ zr<08=mv1nX5M3Ic$Kd(ucGxSKCy9un}ypQE1b%KX})66|DfBG@+FP^4+?os^U?zKWd$w8OT#(HJSq5qZ7|J~#e- zJoqu^L?p`19o`HM$0|7hG#}~jA4E62?D_VhB2FL{C~&vpi0gt7+!u}|V#8{klq(2} zy?G%suSc5+?{CUC%Ic|t{ecclE@;LpI#^{E{lTBqBHLuWy#uO?k(h+~LY69nDgwQ; ztI~-TStvLByxI0AKjH?TqKcPtnVJwefYF;%QTB9|<`>pdx(J9DPuqwEa=zWpwM{r= zFbL(sXIa*7d=QUBZQLN^CR=6-;%qn3n#|50#?b@8gjYesl4z2?C9D~t<3D8C4+(mI5ptN==z`h zU^A?4ZkMcb+wuMndv6_8Rrj_FE8QU7As`K+68?Z?i8d! zIwcj5`sU_&e((F9^El3V{`mepgE98l1K4ZNHP>A8zV7S3?rV+2Als!uS@~HwOBg5A z6Zqs8*6`2SQ9{t3N6(CgyBK{4z|}T%bacdGugq1FHb5kIr;H|xPRtcK&tl#c)*c#N zRo>*$J=$8?6+xq7SB@X)r5A9?Rl z*nehMU@IBJi&a>7oI%nZpMPP3$o=6wXdRmg%>fsm_?2ik#k|c*vPkDFPnaqH7^5eG zTPl`db)Q^LA~szXWzOR@nU=W7g|vl$fPgN6)cIG+S;JH2PE@wPaxvJ^!{k;iRc*21 zd|t?PGSo^7l5B}ROPT$WXh-^k{7*1DMfkhZsM(^gSMRcT#pkB-s;Uxev)g}M_WLk! zfTi_qqPqMe!^$GWuQ(m;59s)x(Bc2(lK)%)|NrHZ{|L|j?*jb4Df#o=`={R;4Pt9m zJ-+<;ZJAKe`-Q&G%@Zes&z?WG$4!>pa}RUG!;4Ew?p_~I9JX#PRaaNbKZ$a~$Hxar zg%`+V!_S>}dYWAJexLj89n>%P4~{OIitzIC-u(Va@pNTTp8TMdFEu5lCxUE{TT5Aa z+(*}V_zy*FB-)sSgt|RDul@Qve`#q{E~3=fSjT;b-ox6{^2x%{?fJuDLz}iW489-y zj_XCxF^r`19kvHzP%$n|=95@|D__j*H4r%eAz-%qRUV(ia$6@XG$gBI(Bq zc~JD`BG%N54~RWY9c0~-)vh*`)?ByG_`{6Y)WNYf9IcYM&20d-VW#12gW=QptRYh8 zqil3^^l*HZC8vtd(Sn-|8-toQpqdyRt01?wW1g{i_a<=ZZUxq;hp+zY+t`uB$CiTs zIK01|q)|r@^88`tB%7bdZcj=l7@1w`hK3v^ETRu#J*q9;($d0PaLCQWbD;^{dvp7Gti2U{2HU%+SAj+ zwfjO=R@T?|_Bt=`QW`Z(cG|p}SGusVG5MoG=16kq0DpIPx0HmfOoU+d8%#+8W-T7` z5xpm@Q{V1TAANMfsVI^I7jMDibZaO*H6cNMa&_SKyXaCioWoG=qURxFb@cJ+Sw&@K z@(KzWoL*e0I8_TuY1qtpraM$L=}Cqjb>&e%!*(ro(z2?mLZ^qb5l-W(HOs85?4mGf z?Ua%i7WA-(Q@Mrc=#p&KazP@TlfzIx%#DZGMD@=*Ss3{2N;n+)_({6DpN-Sa)#l%U z*}Pl2`VsvrDM>TU-QCHG7{;2^wVbnY z6P@TE8oHj$v~N%CN_!^5dwYSyb~9{d@kDt9R8-i(<9qDgXk|FYv#Zv6GM#>SAaTCe zwcH;2p2vSuWP-tSM0qQ)HdJHa@w2W-85R*8hM=b0$bH6G5)7Ny+q7(lp29f~m9^o+ zG@1Jm)oZ@npg3Q*?gS|g+Y}8|Rj131HooJ$I84EHrDB=i;cSf@w45w5pQNP`iMFEn ze&m&wR5T)}7s|i&jsIuFF-oO+YIXL3-0NT%rIS!$Rf6g#sn1Xq-(mIqjKf{LDiDPz z(Ee-Ol?3}_LzK{W)3cDSjrWD}sZkC71QwY- z=X|@{CYX>5ti%nfqj%KN3d|fVEIx-zSD#)ra6j&UBuPG1axS<&X)>CTiAGNPt!!AL zC{I;g-DO1tQI*I_*3A8U_cI&C`R*)g^P}Y4^~&LqkE(^ViQ*!lkjjVkqv?wLm)}W0 z)&=mdz$6YXs=M_Ji-xs49pD!BRDwaGvJd{Iea-HeLfAaroz%dfXSfMq&JlUX`*P0M9_FDN8^57jD zhfhyL*FZDNKi6G57tqqN-PKNx3a77BX*f*28u}25@31P}CZ}jfbYMfhV?^0_QY@79 zRL^n8jQ=SrG7KI)AK%po-`(44f2>Ec5Khjz!VFKyXqxAKRd2GgxJ>P%O@o`?0jS7# zSIZRd3U~l?E1fwFu`btbc>TyJUP@aMQN=--8m?bJ=#eD%;Nawp zj8<3QGkAIh`y$1i9J7~3`Ow7%Lm{>opd`#pU#Xz~ zii3YG>^wPKuVeu#P=Pq&s-HVCg&aGALPI&DcMt!WQUH=$77k1CvBTz4P_okb&5dtf zVB5mwH!0(+&$cw_>?S>{RA06`PIy8&w5~RQ41|^|(K*BKHW=*EKg*jBBiF-u9Asn| zEu$d2gqx;SakJ_?!5`w(RaA%zL%EJVXYsJ}g%*F}?UJmfjlq@ofCr-nK0X8k!lYpB#^@NV0 zxcK>zO~iADtFBM^rz3ZGJK91a8}F4h$Evx?=bzA$#l9yD$6(q; zb3YfLtk9|oz>7V;CiPl4X88D#reQlr)`AV8IPx^&9L#me1A&Vbp{JK)J^gWoUC4hR z8KsQopSX-W5KG?KAI> zrDYDpk%jPhNQDZ2$>6eoK_$nz)~hI-D5CVzEAAKe6AAH}+9z^p@mfl?WYUoMj-Z0b z=xXzcy#6BEs=UseFM!mB!yuJ81i=9ogXf*&qjV`SFHh0Yb3wi+?n}NYF&

-uh(x zQ`cO#oU^Fx&gW6z`sHJSui3P+iBGtU<6;#=M84>|EsABoA9G&uM~p-MSttmOozhP; zEf zQb7pE%w%n0rZb?2K;IjAibTkFwFg<0mJB~_)?;f?LGUu(xbtmQ-e|SipnEx_rW4TV zF+du?yTP5LKa%k!Fy!%?(Ze~Hdh$TQYkK7>5o=;jD-NdttyZ3i(lOJXC^Cw|*}Ocb zUNXOj7zf=jweH8*o9R>(;0kx{1C~Cc6X3b#`ec`>f`h6&v2p%T^5@vA6X||8x~UBM zwmH}If7oI_y?E2@GH#$%fq*l3i%8nn#5$hC>Og`+OGXn8okCp8I|$Qw3U z8~QW%AN-*(%sk=4QnIAmO~)Wq{Ldo%mj{RC7L)BS0Bgjr6(%0-P*|%yOy~eblab6! z${634U*k|5_tf^s#KR)Akc_Gk^Ss7Vjr?HLeapoVf=2dM;;VTZY_7RU9j}aM0h!C| zRlL*KA~ejB4)VAUqXEIF?_bVIK}y{x&4n@we5E)JP)#Owr}ii$yTireo+$R@#A)en zR-}7O&ze5-^Hqu82bi<3m%zZAVOlGuaK@Y;l|*!iV4i6GH-rKI=P=Op3m3T zN9F~FY%MsAO1fUm>$_>HSNL(coe^_vQzvFoh0Nr~r6z^QBK8Ms%ibkf-P~1tzU{GV zKJZ5@sGO?6uJFHJR~_Zx;_7Bds?c)y%HEsPY4jr0?TH%RR~FHCZtj1T?r zWi^RKt}XnI&SyX+(cvz(!yG|6^?a_)aYKfgy-2vqv(uL*eJx7JU+&rSnv1@@^oNHh z$8MT>M2c@k`~4N81-#0K`XVNd*p_HA>fwg~T6pwOig||;|L5VIM(xFswt42&NlC?HRlV2awA*+= zAo;q>Y~r-JK#G0;9g2597G`=}r9W(I+^_FrFzsIZI{%gKmXKx#>nGs%-&1`ePfX87 z#uT_llBJzIiqwZFKVrs-WOmxgPwJ+XAkozGz;nq(u3vKPjmZ|f`aF9Vgrksi@=ec4 zpSgo_Wq+!MfDhneQ~c5I$2w!}l`kmq+`+CVbZPo@N>~Dxxc{Ye$dnhk-zr0ag~*O3 zf!^2p<7QF%WG@z89cMqxH=C5ULC|(q{qmU1pr;IfU>m9(2T=AKft7Gq(?n1 zcV7_A(_Ju53&-fCB2+i74R2DU4US>+q$1$h%?9EJS?_E!1rcBLYb_zN!#zT{d^}P% zi~uSX4~{Dfw;l9tqfQ*A_h-Q-w`y6K32Z^_jjaf$#3C)HTs@DwekmegHXi)qvoOlp zdm`fpwvg+xOjhb^b9vh3%tmS%1dC7#KG zlxp#O!R_cvh=jkXZ-0JU@-r9WniZd<#H#84%6BZWuaYL`KuOrdP>Cs4m7&>YT zOB@Txp2l;!Ys_nIkJIu86FE6Ja$K@xYsZQDp0cq;a|TIE&IO|q*f|~;p1k;MoSjNP z3v$PZp`n?xYTo^iEfW(!?97$?=7w~OVf|Qn==qRK3ZEP6l!Ah^ z1ay(h#*={(ocuzbhnGq}(x$yH+;%3+I5|0w$1Py>l9RPj30Plxs(D-r1v9)7XEAIg zMMuH?kc$!?R`ehtuWCK{1FQ?|oM%hsh%vRp34!`Re}Dc6vu!$Ihwn;FTVo@wL)-j6 zvTP7qR>FlJ5QPQDJD|vas0O z9%9tq45oWAXHG-qPap6$Mm9BlMeenL3>3BdxzU2IYx3OZONFEG1O{rlx>v#QFFXH0 z*~GMKC}g#^wvPHH;>!0)1iUa`77J?uHQpxyiOqbOoh1U ztSZ$1X@GCwJ|VzFMn-egJNrA(3G=PT zbaItTjDGftCOp2Ag@EiUlgHBWEQ8<7O4;GKhw8`se+R2Q}KmoakF)8sp<8~ej@ktku0X=?3CtWXDuivwh&KMo1N_{6Ma-MoheX-SN)jhjr{z-104-J*Z+y=ILx>?@M=Ttkt z&7d_i?tC1GWNwKC=SjL4qJWF5Uf;y-ysb@vJ(H&34CkrmAO?RHkTTg)Kua$WdMO$> z@=%medZ$nXeteBF#fnEcFehcYww;PTuceC7IlfTUsTDp!lwbu0Tc>_tGv%UijjQI- zZ)%#&p?;-BJpyO;P?K3Lp(wEGV9$T}`$&$xV|%?=!k9TiULHp?Po*Qwd7cQB^8|eU zM~bpEQd~}6ww=iwJTNeGIo2X3bd>Ky-wT-OgSu+M64Cop$ZIMqrrZ=xOG`YV0LT_mqWs<(7d0tBG9V2M$>+q}_zOJaUoY{HXU)Z9rG$&uI zx)5GHg1mQ%(wU~rvK*5!xGO@V0#4L~hAjO#UlFCJo~JqnofVj4?N7Hu9&EOHy<$?y zddUJ-Q>9s0Z!>ts2LnY7bOqyuXpc@Q=@~yWvK?=&eyS1>!Su=dz)Zf2lh&NjDZ1_cByC#p9eYV0#_DlTr+6A}{C-oVEJ z%5@&_KimuMIFr>}nzXcJwCU4TiqewzKjxLOQLz}*0npb%|5%09&n>YEk0UYcn09P_ zY!2Q)LnD{u19bECK4q`7nZ2BFHlN=+=0Mbhh#|BpRq)d6)hk=ppc@&83mb|r$>z_-{MV?B^uaZN{$P!PFD$GBSKHFp zK7jRGV`8T-az-O#6EwF#oBW!9C=*EMY&H(+Jh8V6?x%(@*X90LwM+fG_vZthAToW3 z?FK4sx{^T1nG9s{Z0Tv$9HzjTQr;G+>x?lxJbY*fq%}P`naHeqEsF0`s7}4z;_g(* z{Qen8s#fd3!nP=L^JYw$&DHK$oCwnr>3PfZe9&kN{Zs=*f>q8hQL>~klj4iF&)+;x0 zxz$<{h>s4G@>`l>&nJ7~r)(v@$4XzGf^AokB2%YvcX<81i1J3{RN%8G@WFX%Who!{ z*yv4dM$+K(Ey1?$zk#RC!AKt#6WVCJP_WLXrTiXGZ9tMJZnD@hP;@&ld|(2>TmdyK zC8h1Por9xNUrecGu!|Xp4&d>S1{p-SqN|GGNtmPH9VaXI1;+)CiKDTYmJr|9 zNY7D|A_kL5zNTesPpUFG1&A)mk{FJw;owdPd6>$G|9VqfiP{&m;NcXOh=?v=Ul_$| zSF}&y_%QTsM!iZW>!CF5N3og?NQWd79UHxLegma*@Xw8GF-$no)S6c6Qj|TUvj%aq z9Us_`3=pofHsd~w??wt+MC6@KW(8avR8>QgaO&2SzZMraCQ>!5QRU?|NA_QG&1wVO0>P=)0ztPkQTvO?Xu607${@H_>9#1or$rg7xA{|oe843 zLbnW=AKIc+d7m%S(Ycjg7hSR{L1IDhZC*qtA3H*KRo15CHzC?A%$_Qn*V%-%&q|;7 zd>4CGxH*m6OA*MmZKj6^ap!UkH`S-vdAe)`^hDA4Gs+V?_~ro#3j@fd`t_`3j3KDL zy~_)O1l;j%`~nP@<0@@U@4Zgn6b2efN%6#v_K>1$s-5V9veeZcQ$IRU!}>@U7% zVR@p%RTrZMLd7lwqdA2OFMdp&Nlt(ptDU}F`J|M0 zN4?QIJNqW3`OU&yWlhSS`L<=dOO~arEY?F$XERk608R(=h0fIAf?p?WuHyID-i%FR zdkQeyBK<&0L&*Owrg*cvV(rqoyE!(FGLlWrAP`j|vMn`N?dmc{AhY^*NG$bRjj)keDQ{r-9b8exj-CDuc!4h0)*0IoRL>S#mwiyMr=(gVA3?XIDa^yM| zbYp_Jpv>tANe_R`wT6s{H|abFYp+>9WfRo$Bp&|Zs?k;d$8?c`RIIj*ZScx@>PK0jiihF`cQTD5({^eqLdy0$o#U8&O z%#RBa>W%A(c(=(O7yh6&-2@J`Bl})4a6f1+s!-_E({2!5KP*|xk8xZt)(#(>xD<0z zQd9Qfeg3IKl$i12xt&tqb!>b;Se{hZ?|m#i5d5AXg{_VR2$_RX_SjBse zLVy^I)}NNBoK7&87FIC#efD_(%BmX=ovRX(0-+N4qaN=gcsyNMC0wB%(LJt`*9?iN zSt=5yqwD0)>4OxSO8K3jFfQj60gJvC*pHq;wkB79hh<@nkT$@BoPZ*g!cPjF(LzS# z$nxnkFZ}u;k$mqo_TcAqwbWy4M=#*KFD*DiDG0yu$@?$=1!llCoJ&kp2#9(|!-fK$+#eGG1#7JVV0qE-=d(JCl6Q7yZ zW~97Adv*?!s{mMK+4t}H|Pc_v~5^q2j?wB)| z@TR2BHM?_Qxa~F`CF{$t#j}G0&nxkJy#ly4gTV^8&3cuEwj1ccOrl5s)-1%Ohlg}U z%I~;03%xvO{sVp!Xb&WFo$X+YsX;_@M*2J7KliQ>+BvYwvrSdhA`IHRZ56HSI^N)mfGa#W5Jvb$_85vM(F& z^~q;9-s|!OfI;wA4^rDhBbl=(2>G+W?P2fC(CbU{a0YooNo2rrb)Wjf-O}ENKw7eU z)bPK@d6I432n-pBFfnhp3His4Oy7}diy{97FW7aG?tv7U$X`*o>_2Zv=m}*Y z*`63X3C^GV`Q&NSZC7M#kqzPQ!#-N`A2H4df+t39U@Qif=&}rCzCAK8c7*GQb}onw z=6;Ml7Wc@I3=#l^AWhCXe1E^;QhFNiEc%laJm1C!mOEBgV`3Ii&A$L*WAoXOEzhq( zPb1+nF8K>MRF>RF{z*@|l6$y!e<SUSj zQ;{QxB~4~P_`zMOm;6345mX*#pKO7?o`j^N!n{0CgXQAjsO4=smb}Nq`hUQ~0Mfz= zgp7+wBO+zn)22t-H#aS2l*c%~QBqOC9&J~d*l{%v##?#}2Sk^kHgCP9@4EJRR1lQM ziNYgrHd;Vq(feG9!*P=eeb+^w!gMzQP-#9nIRW|&W_>7ZWY@VzX(3Tre_`V3|GYuR zMf(r(OKmybHQ;NOnt0f8eR-Noqy@m>UVP;z-Jk~sbw0lq4*{k$I*Q3&>xsM@Hfvk7 zzk_`b=_eJ~8ossv2`DPVAJ=<%{c%Fq+iOsf1}KEF>a;YZ=Rjwc(DlDEkpBgnqo0vV zAkvai2DO6pqw#K+HKa*l;)`QUTG|jAGi~}0$7fojEK-yL0yp+o#pKj*`mW-_IGSI%-!F3hOWHBM(;^TwLb_HP49zFBBOwdfW zSJ&51*BXPx6m$kOIIB)AJ>U^OL2uj6^84FXVK2S(uLb>A=2GLI(Yc+K2OBh24>Ce@ zP42}g+Vn-x0RnKkU!iIRP$4ktpe558xTs=i=)&mBtgJ-~&DM|J0>=(u8PIr{sC%_v z@0YrIa*F!vM;)eW?c9E}5F^0xPbL1CBZ2hFKOj2~r+6#IK$@;~ez%nb@cjeat;gta z$l2$^DkA z@Ke5E{F_4S*E!4l+I)Oev0K zstL>)ATHT<7NKga>jubKgzBR@FP?mz2$k%hW+I1Lp5_LExN?2R?~|;Uv)zTJ#Gb5} z_yL;+_~4j^%2sCTBbbTMNlJj@_KcMl8?X~OH9}m2uA3(IQ#!H&G&DBG*8)U&GItzA zsK7Te5|?Pg83=1k;p2Zz0+G|K%-2PA>}O0(lchYWK4%f7+VNOayX6?<3=XYT?PDwL zh0bE=AYYo8q>87Wv>5GKKm%umEwW5ohOp5xRgfVw6!-G*GNoGc0;-J1NmTOkq#$KX zIpa*+KS`-c*h3X>Sy{=cIAwZtb-BkNJJ3?jM%&|+ysXcCC^Wg@A$P~_`L3r6ahU7< zz9B!AEp)W174|Cr(76PPaK%;Wg;K>NKl6~j{OB*#xTd9}KdDFIhc=g$#93y!l;|lh zA<;wbwf{)y_4V@XMfBZ8G}L25H^4-f=Exq~C_N2G5`ON~-0^N@jKWs-m3{oU(`pFw9dEySdHZv*NOW zD61iexvolms}mf)I*7q?{fZztC{Pwr4mz5DLLanj?@>o9LY0Zn0uNZ#u9i!z54tV< z^j6|#TumbPsH{(3gzAhFx%l<>2Y%&>F7`CkwA3vgWZCXgTrgAQN>axE1MXQh(oEKvUA(C zD3lFps}WB>x;XG}CxM)0QM`>haSjUhchOZ#NmM3tMc(l0FJVG1BM~*bo^(i^MIw`1 zJB=Ges+PN7D%AC#Or=+*creUk#0|oQ-r5~ah1|A>hksCQA?4+$HSGO<{SKWhaCJu7 zL%OzUvb{vtO3V+jN8E{h%bp$znLLT8`?w|zEs*Nds3jtRm&9|x5QJo2lT-ke>LTb; zoW~}bf9&s<*$#Y46oGopY4l26N5?asMBoSL8JlyeFo=vK(UX3QPO$`PS0#1d_R%!zjZvFR*&C|WG@h_p80JZQPTRCwXF53Bdc<_#rX8yEqaes#y zC)1?g3F<^mQNpiYO;`1%zrM7u=rgEetcnLxrE30@RQq4iZaJrEJ|m5VX`Y^Tb_Ojq z!*8Kmwn^=OkXD_uo@{R&6odTz)izm1|6v|1oklnA3!;zwOG|C0dw37dp4U`Yf3U&( zoRa!YaejT7I$H48dYq#19JMHY2)W046;MKLTy7cKgk=2N$iR1QNDf*PTKnKKe12S# zxh{B|3;g0g$`V^W&u?qwe;Gis6#-OZ+gW13u%a3S*uyuln5{dTy3i^tRRM@`fJ6De zM>^LZZTx_o^L~$P`s$T@8i(a-7y6RZ=1^qv`sOAE@!59}nXW+dtzz=CIv;@$`hN>q zE`AK5LkoKP_k7U5!W;UYh~}G6#_#3729d{$iTAzqP4V|Y`M)mT{{gKwoned&0gkZz zN&95yw!41bQBx`#IR zR9=%a*O(I_8L-8GQi8L^KQ2lm;AqAK9}e*Xo2OCGunmlyj`wRupU}~Lo1T86@X5-? zW~jfP=&8{IE6j@VTuEpz`VGHOP454sFaJ*`aXIv3lUWLsTDXxZN8~aE$=JDn4f zK*cZ9ydeqccG6-D3Dpc8NgFnsUsxDgD3OkPQdQN+k-6auy`KLjq3}-Rc21-P>@?@Y zQoau!bKFPHE@WIuZU6E%9oe*NU0?$rXM^eEAtEB8@Vh<^XKUrL7^A%A)twisxM#|9 z|L@7fV5`PUZqO8v1q6(X;x}+vh^IBSofgzy; z6fqHkhnV$rlnjaI(&iH10AsAM0AGaC{}>8wRrczD_# zF8TMtlnq*v%#M5Y4R?e|0V7PNyl-MN4%VWn8(lAs}%_%kRG9zUzspnM2 zx$2(kb}B}aM%(@6ENEoXB%0jtb-`WU*&dW4;ssd`Ef&#i%|FSn}2wbDlOnl!qT z=wV4pr$jp_N{v*Fg-ei*u$fADjAXhij!Dq5n=7_L-ywxZ0NTHS&MkF z){^ALlErT2t#wzh)Rfd!Z7miZ_riFc0%WqO6686v3q{PF(0|J*iO{R5%X5lQC39Y0 z!=a#Le~ahT$hC$S$K#3z*xd;^v>&FT`bzNM5j?e|r8#aTuLYSzA zkq^F!|J=|k4@~K-I3JrdGE+C0Oi9dAnRW}>oEt+$vSrW~I)jVQh_KUJP!TH+Nt+tB z))lSb-2-HDKvt6jaeOeQ0J?RGXvNgE0}V$Im9P4WluY#A3nyUPh!G(1G9`}K5d*8* zmj+P9jP#!&^-Ya}XB-v=%@Fr(%ksf8^b(SnF4875wDGctZoPopl6Uf1wV$&!;d9Q4 zRwVK5ovz~uy48s2WJ<9TtLSlHrZzH>RsEZRN+u=n-`AT~!-Jf4eUA`HD~Ez6rHaFS z|F+t-#|wZX z+n=?Z05g)?o2zIN?s96(p5H?{Q2g-UDB_;~a$rfHj#|-yl;^MMK>h~_Bv{kj$jWFj zlEzzK-wL{cS;>Eps-d9vOH^I!s;G!EYlHwpkk`PUKxii~4rsaq9cAUxQc{Sw^b8C# z-1}^iZ63i`@SJW9`>lr3EqR{~PfV!fr49`ZfpmBmni&KCzYOn_ejwVz+b|E0ugiZ1 zt%Clmn5V*Rov8^l`m8tOzzgB-cKPlKn$)D)HGKqH)IiSfQ5P3$(R4B2fZ-u*7wIoO z;H@}Le!Br01U~4dnwA=4Z`IVXD9;xZ`|eM}IXXF3?NP2h9WHKaN-5NfB<2je)VOZ8 znkp_E$bmKgw}Hob?@F67I%%_ZQX%{-4hZmDva+%QSyu5cwuJ6@{SALlBmr{?Pdnei zfq`G34_+(ma!1jx$#G)hW>Qn4lt;~9oig%~AqupJ4K1Jkk2)&6*x%((g`}k~K3vI8vf;T;Vqb!O>9q#*wX3@aFT)i9cclyQh74Y}q6rjvf! zt3Vq@oPYPjt!*$j^Umz|7#yHyyX2M~U_kSjmcy!9t`;w%bjYQK1BQj*8O)~X@CWw5 z1NXbF)@)4{;}gh6JZ+3gTCDE*qOcdD(d ze9Ft4ESXsEvFZ_%*Qwg%etmYbF*rFn$)`{B>`O+K&1`jTZEe|b>Ntilrc1zsf@gY= z)6HSA@-K;jvLUq2Pc!QJ!<<5q-`cvm{-!RR23{N{WDEl#c-G(!}j-? zU*p^8e>-6ipYVEhct?9EhL&I)2IR(<^jCaSSKzkK4Sp|uHztNjTfoe~(0G1l0B~w( z2zEX2T>?ALN8s;sR@sTMMy0(i3M^|DrZf!&yf2REwGnC4fwLXdf~0h*25PJe>_&m% z&>$7KDENcw^U~MTpYm<_ zdF9GFjJQnuu%;)7cwQSJDt;IdSqT#QSv%?VW6|sDHoF6coI=a?sw^gCdu-6pt9y1U zt^V#I;-b%}j~rY$jtyu*_;**a=GYc6j)8+4P@@V)^gGA;c?BlcmZJ`-Yhw>n8?I1> zei}$u8%Q~%SFHwP6Kj`WF%3E1Q<*z^{1CyW#XRFokS8j`fr?KNPzxRD{?^;k}ME0Sp?!%qGI17^~Qb;ai% zv{K8-0+$_`zO*2qF90g-AD^$ty`o5Y8CNCXp}lUM(L9U{mJhv(+CipUzoyW9=5;#k z+Wv33PN0UV_~Ak?B;*IJ(tbrld!G87 ziYnsKd(T_oUL!@n-+SkD(og3H@;Zl225Nv?$kDDrt_s`RO2_03xCbjlOJWH-cXe>M&;4SSGS(0kqYisBtye8Y1G zUFiA$o0>%^x89;pDy_v)5#>vxH=#*&(l)r~g?)mAMpA*Ah`kD{%Ae-nJA*EfR$$gB z!H6B(5THDE#cX2f|(fPmOPWsO=Dce_uQ2~Z#nEy@J}PGSyN} zdcrC!RFCb9X15Y;t1j_On`{P(Wsa9I8B1eU!T3+==wU4mW2h`37GgA z{k#B9T)tm1MYZ-Z{d(LG13WZQOydQm0t4hmx%YSbIc8!tui)@Fo|wC;NY7Y4wb;lD z_+6c)vZO#XY>74ZXxqWWQ4o}_Uq4|@2h6JQ?hGKuvs4;*L2l{E$GH#ljMWVTXjaEg zUk&uFz73Q~Y8ox)7+Rs7a<=sBG2kd_d_FfvLg&g2y#{nZo1vo9cpWW}Y(gO;qX*CP6P zImPcOSZ$E`0}d9Oqyr~+wNX=3-$fm|bMK9?`5vRHVZh{)&rHCbj*7dT&0>ae$$UQcNl*uf}<20w}~PaRjyiDz!GhT!p%RHNK7Cmf{It zy`FUkw&6J`P@`=VkF8;O_BG$Mc|T@|^2;>-+Q;k^_07}|6?Dk$f@@H>rkW31Q{&^i zw1+)c-f^|KpAci_>r}mjXQDe*W7be0=eE-$k**9Z+q_QZvBZ{rjOl*HYiJ^LL7~d#_;5>~DjxtMR z6l3Nhi*gZBv@ji^GY&WmfrJub4enq^YlC^Hw8pY%<-S}XvX_kGUS_ZukIi!?OvwQ^ z0+jPZIIlW`B@oNo<1!) z`jF9;Yx5iyo+t>eL8FqjmUo~RYJIInYAMpC0vD&m6%~2@^ol`2?lWyIDtbwdH(nQuT|%pd2tFI1hoz}tK>g!aybf-pmt5hl9nT8^OUNFh_jzSM8WDpI^$<|7j)Xa$Dw;@L3A zTl?)X1Mg>_2a4jy7sxYd5#l}rIvgRlDsp&naA@NoBoq-l;tT<5dN@oxziV_9q6~pJ z653lJx9`rBkt|Cg1KO-Op%OsSAw|6elAtm8iIP*%#^ahG1p>9PwAGD;rwGADkbzd=lvFH*3g(DLk z`{N@rc=3&|_`O18^~sTAUpSuh97S>w-uDc=xr=CDCWQ73Y{xtTEd$+&M0KHFkk(-~ z)4YT+U$U*`P9n_hHC!4WrR}-?l0u`wv3mWw279Wz6C0DL|CL7Dkc7E*a*o+TtyQrO+ns3Ou zt7~gt4i6?BP?2qp41mWA-yST#G%<-hJAiM!J{iokI(Ewod@l#g64m(i-c2zbgyM{r zdH|)q7x2~$Pv`#&sKdS1xuTp2$-0s-WX1%#lA=5$rqMeZ z{5f3-%^$|kzIe#^vONYkE6&-L>I0u^)dD$Xaz|fa9}ak@xdcwhRmNHR!7TRD{@>ss z9u%wEiQQ3}7rYMF*|AodrFXZ7cRl@O0~ChBjQ6&hh$_jbbtDBRade6rj~mQ>j1~9V zJ;9#s=WI`JFB|Di*6ry31P6~8O)fC)T`36_LA(5pIP9Zh{qcKpl%LORfWu^w;{4LO zqK)(QSj_Hjn|AyxK8=)Q45$K9l8Hb9oVStwJ}pyONvyxmv7U-Bz}eXYV+Vd`MxnXM z6YUd9m{Ggp5I!d;nkgUK@I=)-Y96i)cYYeQJy{qX3$SAaEFzCY1{D<*qrfu41aPD^ z{uvi$KI$;!OX{aZUBH7qZxb9G{Bg)wgfXO|_4fe%h=WmY6xrS3-HlKlFng;T8j|Y` z-6*vXwtN6NVfL06pOeU~%fdaWqRI9#$KA9|y;F?^QpyP6C~ZYM!m=d9VupPd>V9MV@B*-ER5y zBu5vYX5OCBC3JrMs5);Qtj=!`_|-rkbn)|`*Fuh3LyQMDCo$Y;i@JcF_BeGRQ=aGOR=YLJKHCu6Q~mo{P25W z=2TBFa5UOY0S9iCN=HGFd3QZ}2lv*fBA|EW*PI!_&ey3aqE-h4Omdr24ofzi)md_V ze*!S@&hkl=9VJBk*Hs&dahbpzcXMOoQ;yvl`sS8A5|h1apU6Q{P6 zWR*igHc5E_@wUphp4~AkK->LfO=(X9K7bC!&q7GuM z@UcT$TH30cXk*n^^72!i{1bM4O;|luE}UM32)=>cHMFzlOmu>OzX}^Oahk5Qr-4xe zuG+&ll{zxPe#ha4E5AEMIOoJ5rJA0g2Soh|X!$f%R#x^(Zr9?nU$SWsk?0bJydwo{ zf#L0j;X6||$7uuJH?k}&EFekt19r@k(d0nhW}QcK2ek$+eq!^DoRDzFK>i)#bs1q>aAvFQ9h-1BO7Ti6SRqcwZQxTQTCQ$QMO&%ut*6K(v1iL z(p>|HNH@}rba$r$lG5EFAl=${60 zT^`0F{v5x_QthTt&+>{23)0SSN#ASqM6#hNQD{v@UZDOpEomV*^az^RtjXs-OK$mP znB45VpdKwe$H2f4jz(J9>{;G*CW6Oib>^c!%ggXK=SmWGHbX5Bn+J9gRnS3gvkzmM zNW*KsiHlSz1|A$ydZD=Bmjy>KiQr%76#YM5%|AgAgf#L6e)DxCo!mTr8N1N-?j5|` z2mia0Ju?>;5MKH*4qsD4xB^&y}t(4h{*?s5ZqQ3Rg_kILH~` zpdgXFIz5&5_U4va2N=wU4~tChPAfm;fb zf__luC5&skt4p}1338!FezLb0Y!!X}NGAE7rUmcZmVUm-&)y#04yqghv8PM$_9HO* zo#k}kk-)YblverI)3-}94;4_`PjdV)9nl3L+WJ(x8k9>*OT!3ka{L!cf#11bLHZt= zpcKN&erZB`=`g!O?A)cF5Aa14Io_^~IuqdNg`4a}-g~?302(pfmzq{J$zr@Ox2{e) zl_!_c>h)7!O6cVnwbSK&Ez#!YW=jTN+ZiEu5leB2221%{z%&r-j9$O;2H<6%?l?8Q zo6fsmPPnU%v(6G@I2`3x3$=hXnX9?j|1f;NivEuW^WKNAtxd|5i;vqt^Q4fl@&U7h z=@OFdL;O85v<$teIE||u``RBp%4mFnpTb<}a)hX)+vQTZVMC(_OGv+^T#JW#vDtNs1>A1(dX z;W6rIs7x|1W89`_p=WY}klo)Se}9$ybpkX{!#X9YA~f--yd0hX&lYPmbS+}yL)2I@W>9+yCBd3&vUccF{w?JI%Onou#T zH|m(9CSbHVka`1DkT$qOLA3y0vzaOww5vD57wh zA9X&f?b@e}@oFlJ??dB=OivHbLL<_4N~~mi{)7(PeWBg|xV4Se;^>>B{Z%VZC2@6X zprO^~Vkt9@0bFW$`>Sbq)1>cgiJ4R&;THy1#bhLS*5i!NJG@`HHmt!0R%o{xEJt30qfKuDooRIK3zm4hK0e6WdQ+Vt^44mjov`pk%tHjU^H2;#RenU_n(6G;) zg>%+kzEQcxE(`1F(}=lIAa&{cz#owx+SVpOq{;hYo)+u-oIiW}5sZgY20-+IeAJiG zE)-??JIxQ9cNXU65ap)(utSgRf7{cMzb{PL6pwbs+R6~V*nRaWt(gKUcLjp+_onS` zsO`Vvd9U>DYGnN{G@`<{*Qv^y%iBNImz~c~o!{P}y0KUVDb7KUSsboaqNwn}?R?E)q|5BF<;yF(4>yA_@TzR^u>@P2tPv z?W4?=+Pvt($`*Ses|}K_`YpI@b3`Odxp9njIyUsryGsaXI7vDU?ssb6h9kuDhpJ27 z`z0Fey`)iYK*O9(*FPsxj<;uB8oiA5!*9Fki(D7Je|OrzH@ae`sxX%)e$g9rthMHi z4q(j?`V5kXS3Dt zs(N3mWNLje2`E5Nz8r0talI^{?PZ|uJ1)2+Cvfiw!J6l_;PMvha1;F?ld zEkRS$@@5kG5!BB6n4U%iHccMZ$H(X8h}Z0Tv*OgqNyVYB?NU_8F)4l7)ulxz zDodH`TdGQ09wnw~ln*AklakEs1y_n=ilRgI%TjXfBI`M)?xY%c4RX7!y;FjvZT4kL z zYin(msR`OgwFMqTUGaHbOP15L{uS>@@$$aAj^;D=O%S{54=A|Ro1yaCY6Orj?dS;# z5fn}+JmfDeZ`E?yEohpn3u{9O<(6uJHbF6dX!I7B+$4X;XTzc097on4s z-t(y=V66+;CM@9%l#+086mx_49>0)tKjva+5s8Ox6xNGsD8w<;r0siMlq!k3Fmtf? zcv#9vwM;YU^UOR>PFS$NOeG%%^imjl-5F}eVl*N5N0p_cEV=S@#3Zu)ds5QvF z;ol4;)Ce;gBC>}0&`?zZ34Kz*H!AUJHo8Z$Bry1y)Pr9JgC^m2FNYqf74R4)vgMh& z3|}b=rE(ccwM}~SM1CykaSyZ8EkY;U!dw-!Yj|}Mz}5A}RYEBDOp)#tlb^6t z+%ee&Hfj(8v1w*yC8i?r!jy6s=)Sow*Y9)qOMtS~5kLanuWR&GqkFs!gEprUh@~&a zD#eUqzxB^(zUn$Bpl7e1mXW7Sz|}EY3iMY^Jo6JmuLrlD%^Um)WMdW$RWZ-KOvbZZ zhc;ZDG#9HiW%+TZlBW4Bf7K>-JT7%yoxPG*e97Ae>aMwU0r~l0ub_7SQg`jHB8>Lb z#nn;XWo$-%5wh_PSHlHOiiF{D8GAf)Q{yS^g1^6NR`TOAHI3te4m!ICOO3{#Z}D_+ zmgYL>mJpN2)sH>baM=6%Wa!#IKE*AkZ-Yj&Vy^Ue6vYrE7_CkH%7~N<{^T?A*(Wg( zU+ZAD@fKmvay7 zxb=66?)JW{as-coSBHm(u+Y%gv3gQO|15XI(341_1y})02 z|2pmK)?%TGuj6DOSJP8Cpj{l3OCg-|zlcAnOG0!tEn zBaic8;tw;=4AAA1oBJ9>%p}H60$9I;Xg?>;8M$T3LjM}{^Ga2{(gRUj(x&9+NP!9^te#(Nw_%T<%7(}eUz z!BU_cNVv4k;}8nvG*ghPpR(Q40RG5 zT28hMEaX@m3|Gw`WD;P_;eI1!lgE04d@xuIAT!o|Fd=hiAq%gZo1;m}414J|;MF3- zr^RW-#iX>KpOV&6)00)(=EV5-bQC-8n(F8JVRlJU7l65GD=X^(OG(sC5Tdb9UO|@jb3P-N zdAyx}`jxCKTDrq&z5%y1l{5+h-&MWg(c$kr4CU`|a~uOeeAiG6T}XU5+3y|v`-gR@ zykY+Ks)|mb$nZ;HB{*T&G-X&zl~#`{>$l;c(672WfvnTDvx9?Xg?=|+|AFF*SGoK5 zM}N47ctBKcL`8T1c_VX$@r~LNc)XrYT-1-4WFTn;CZ$v=kQegzxzf(dHq4K4;orYZ z>8TYvLX@JE9|}UQ8pNdRKKXd$`2?K?aac@u_4FivY_E}(mL4ipE}H$eUs^x+g~&lz z*wD$TrUAQy{yHQ)+|I$_>hcoQ2Y#NU{IDqFei)fak$xw~f#GC-K@#8@|9WL5&1#6{ zKuGf_*6&Nwn#?Fl2w8%Y&1Guzuc%8j~4 z%=ZC#r<;ez0z3^VDJg&un;ddF^-BRkf9ZPZR;2J*_U`zMI=#aTU1T-PZLuCywyhHL z^zB_FpQ@n{ub;6C)(Cl|lCrdQz-jac4Sk=il&M$T~9u99R3c+gE}6^ z-6wUaagSV9FOL@N${lv<)&VrJ?7g#eFoQKDA}k`pe|a7Ab(?85UG>b#>Efy9wcW`} zFOY@|(G2U|pBMPq0R8;0Y52}itKt2B{tLJ;gjLE&c+4l>yK^3AL^o}@P|@^hJ-sQH zfsSsa*5Rr>D|?lJS~jDi#d(gWK?wnx`zrqImrR>r)eeKC!-$eZWO#`_uQJMlw$)r^ z_sYu3D>^0g4lHbJVZK|dk+n0Ey;eN0wbgs1S>*Y3q3T`KitJ+5o&2k}r{f!WJ+g_#IX_TI zk5ODOa~x*M0Q~R-vAftw|6LJjstq*#3$+GP>s6jPBA{Bz@R=Fqdv?tVohF$Op`AxaMbKag;W4+$Q0UFX>evOPL#dO5%C7wekK2egP2y zLvL-q%B%>&6-T1A%5d5&6u6H37+y#;O({E%%BDs@vDTL2a2R=rXki!|L`G7z57A3Z zE6L;<@l*@U)w}}&vX|!{#qNF>jVc$-&}%erGG*LdT)n$C09qR_ug>x{3XsBUFQ8<2 z9s7U9VLPD8i1_8UrJY>=zx7Q*#M|r1PImaUS11@#u z2Mb6c)!lx)`@7+7CC(W*kmMC=nmbwR8^y8+U%x$akVL-j3>pDuOd~(5i!P*04$?AYuGe4~j)B0i8q>NTa5R(=!_D zU`Bm@5Xoo&@_X0rdUY!L6JE~DE-Jr|NQGrgNiNs%en84_<7j8`{Oe1sR-%Bn8WG!zUgcw)@}}*dPsZr==b{ZHyq% zOcS}|udWYVh`P;3t83=x)>-(E1Fee|1w#R}$D_6Jiz`Qq4a`!)6CndWQQBmSYEr5I zdKl8x#*O-CBV<4!Q@^V3;iCD50YG^cYtng1vHu_&qh6oT^`NAlz45VSQ7upp<=O7AEdo`N;?4wRAv$)3Aww=2!bLrbSW3f zoy74A?bLD`7?O0mc8j?h!$=GgIUt|06@YzHBmCPInM*0cBwjEuMd*HmOujbtG8xni z8U8#7?;hcT|7ZfSWP5DmRS;ND$_{DAV}1yZDI2eWcf-eGk2CPyqA5)ns3xXy^?*Dy zXMD?@#3u!7_7m?n_C-tm7vxuJz4@8Pg zp~N!Mr6P*Fidsgs!4B&fBr5uQDOm$cWM3K?Nj!zN?N@PGGbg6Sv9U+%iJT52@3SZn zBJlY=ZuOZm1}(QHvPx|gJ;#S*Jh$n!P~H;od49{vroQgSAjUMO)X~&*Iqj<`Sn(3D zo$Uh`rA*S18?HwxY2__>T#3aS0od&fn@KYQ;^qk}u-TL;L%NDpTa_ zB=$DV*d*BAjVoxqSdhskFNs7uuvRHs;ND+Ay3YQ*d&KLUkihHyNUXc7RISB&vOf5Q zQCFhf$9P6`6y#cl(#FQ^^EeB-~8p@7J_?2Ca4*aKQ1#|!{My{4Se=@ z&_R6v3O?`otc37uXq3OI6>%QOpmt_73245rgm}mXfo!y#;*75+l^1K&LV_UpBN1Ih zjvIkM%|mK7#+_+gi(vfVZop0V(v2Y-Zd@0zAq7iJm<;>G+kQJ_dvCMewh#YJt%s(U zR}j|F{nA>-#-m+2lz_!rZZ^$)x37YhpobGXr^D+9I*-FKd3CS&qR#4c$%0O+cUV}7 z{0JlTOW5eO#6-dO9)^Vy<3-W*@^Fp=t~cKzF$|z=#BO68k>Lp^TUdFcv^GQhKT(w8 zUvEBW^Mx+9{>qNxfjRH}bsoJ{lZ{5e5QwutQZm=<`6mb3Nol#)@dV`0V5=r3LzpUB*n&T zbe)sTeVQB-J8AqzPEcN_ znsB}hC$l{b;V_$bAtPxPD$`mrFN7QVuD3<_ZOHi8TeZwfz~xyK@ngKZvCR*^;$Vjz z(-qIo&H#1r=Gc+bYF@ZIIwoeOR2wuDSN5x`m)8_W>)I~J^RS$2jPtyv*KJ+TP0$Ui zS!52Aky-KLI_OT&;qlt0hbsULg5lNMuD1;Em>PLrNZp5rJyd_O8pf4>OBH^ntlj*T z7g{>Sj>NX1m$D%0^82o;%Hu@+xGXjf&B4=By!L2uV)EOSkqQXWA$I#HmLICBw&`t~ z1+ZTPj2?R-iVuaWx_yVI4Y0`-J#^siW-+Z1az2byjD0bf3sqbEQw&t zOe;z`ck}TYNilDOt|#cE(FYJ(VZf`TUKMauI+7t!7KwKGR3 ze=+%L5Z}LERcC=L!Ikx%&p#Y#ZdBRS7K8xVlZ|N;8@A*FvBTLKyoc|7B;U%52b!DN zV14!VB|lW?Gucl4PD$x}8Qn8ta@;|=it$SLjh$T}d;y$u6x#9ozPyumy|<{n-%;?P zjVCxB20Y@%;MNF$hx1pHnUxwGIE5FcDrPDwy-krz2bmxitYYKlu~}TYZ%~6h!+VCh z{LOSPM4U&BSUH`q3uo(Hi>Pd+zL9WaSNkKH6)9#m3=Pq*Mk{;b{UX4`1PnE9sG;dN zDBge#&0;vkm5->at2?3`GpCtDKnyI_ZPWYpBMg+iHntUexk#Fxu0y9%I$vqDaB*-( z5>I(QE(X|DtvPG>+QFTQ=awzD*fw6dSTQ1_yG0dljs%7`HP<|KBUSi0@E?xpY3;wIzHxm(4M4>F*ZbK&MtsdSDS`To&*4Ab)M z>ho=K%T6J&@GNbM=^Dlz37)c8IHZeuORg>!-%?V^_U_%f}JPtCGBmYB+IR$hvuafRZ5 zCX9eFeQCG;p~Rjv6Z22v>xXdv?;0l{DL)$e^CKhQi;)lg6B4ynJJxiXL6^dZ zOOPxteWN|U z4ptOj6Ba+>aHbX)+j^@UFBtYBSM}~0fC2)UcnXw@iXq?Sq#a{v!Wp0dZ<{^a=!M_o zW_xj}Gdx}75wnykVVQ&Uha$b+lYSM6rUB5X`(xzaP`&%(z4~73lIHd!$dRUjRfr`~J|i^ee4RAmsNxM~&$uC|Xq8^04A;} z^(FxzaH-4i+*^GxR~j4AmHJur?hAIWwJusTfq3sykyDfHLIe&JDI!##A5X`9QJN@x zL9*gSF;49_B5aoyr{T#ovNR}^b>SwMhJjAE+R@*hUq-zoUdk`fAR3zhXJeKjdT9}2 zm6MP#1MG_p(kkMCzw%CVH^85q?wMa^nfI$!@6jI`@*a1(4=etrR14#bK`OSAfj9nB zJ;X&P5y#o~Jdcco8plYMN_@wAdEqL*IB}MHTJ#IzA|D?K_Lt=W=_4M`U!{cou^yYS zff0l(wJFWz>oX8Xg{e z@F2)U-iA2mDn0jbeS>?@OirFn{G9C>$C0+sn#ZMF29Ma0v1}IoyA(DXJ4y58rESLs@n&+H+442ew;KLC{6GR43Svfo zAR0+WnQVN2nv;oX@;#Oi+V%%&%A)vNtd#rb6+lD*BH7hE%>VYY@{xbj3e|Q)gIjiw zY!}MLSkwlZC8ojzTjIDwqRVqtag*CmMrc&c%rH4>pEt?EAV6gG7QM>uE2lj^kFzhV zil2vGJX4hAU~L*&Uql0%bCj&RI|5N#PgFCFb8>fNaVqKYv9YX_brA`Zqi@Qk8hVI?g+@~(u8s@!nHP5D$AF?0K z*I0la1}_j13^ zYld{vsFYR@I;vit_{|Ful}*;JC1}mmW)0{2tj@$j-MhsW7Xdy*{;HDZC{0y#Acjff zbwNw4vRdG5Me6P8nKwW8;Zpo}Jn4T>);YoLof?ku!|m~%sxPOhzNfjHCTU7qLMMbs zs(a0DC;3s1)?rChIey=7Y=ltwyoN}hUOlZwSFp|T8%FZQV{9N&ID*H9enGXj)=A;W z>Mt;}Y!{|UskOARtloH*#E|E5JZJLdWhYjQ;{4zW?J^_uS=fs5J6h?QEHxGK8)0pe z5pBaXLZt8vzTr~kTC-4mAVFINzk^0uclr1cgLQ{ZRU>Z6Z0|Z#GKZZaLrNBtZp+BD zpQETK6@fi>gd_6fkM)_GHV4fw=y*+1jfC75<%xz55nveU;>7{B19~6Vdc-(IBiw7E zI`J7)z@Z1g^#7(wo4$S>8O{hw@sg#AJv+zS@_A-Iun5&?M0w{ay$qZGYzk%_*sGrANoiibvY zGq%Vf_Ukz$E?uPQ%csPcL+N!ud&)dU3jH#&#rrYOh}H3`+}VPd1Z%;hU0!yrx4yBl z$Bu@PEUmKAe#_NuHs9vV*dR`z%lJoeLu`0~w4qWpN2xeV)WM%{+bwOoX(oyW`!J4+t>7;9wwO`r;$_jBdKvrbMUgcEV)DSR(c-stx*Hq>#7X zT2}#E>*3T>g=+KRfcNchtG|${rPLjD`34Z8k??SGmO9nZ^ghLlL?h6r`VbfH%Pbb2 z(ecj7-T1Re?v9w(LSqQ(({2xRV&6^tT;0cylnRO1yw54Q(52(XM*sY9e z=G(JDd39X<>5Celz-S)Ldyu_kq2Z+t93;dhE_{y(7r`!SttsZ8ixxQb`Wt{qaY(!`kSR?h21YdW>LUz+#k+=vsxhLy#MhpO!^*m2V9-rCl}TI z_jbN{38Rrw21$Jm?J%d>opone$#2w^jg;eGLk@|K*S3V*F|VGo6Gt0ZdM~-G%1`Um z?BMp$a&g1vJmk(s-tvG}w|T76yr4Jaaxe-L>Q)Qkus^(Bj!)~tP%pnmY=+Y`+P%($ z&FPGNO3UijL$6FQ7+s2i-;*B_mcf@+QcygkdKkY&2=$q|)@gkWOB0TqIXk{VB{-yH z1OR)t?L~Mx1I#CIha!}#9=kymvtnT34ddJsMK4G&We9jg|pmfeSDu)2%r zg=Tf$&F~Z4k?U(Wb|uVIpV;>@!~4eBPk`qq+w3I}5ioaGZHxL@FRc$u@H+_I<6{Yx zum*!rv|M*H2^2!irB zY+!{;ad!6@%DEYHA*w~zJ5{i(=E&8;vvbc{$vYXS)ud?auiXLa3Iqk9_Vc}TiqTiWY`pSYIUD1tObUQSvL%Y$~V_rH{uK!ZN7bp_AO$&Yf5F4g<4Bok%#l) z39tF`T~reW;aN@17Q{X2hf`)qmjcV5h2NIp?zR}6;haC6kdQoSeB0 zt9=~2Fl`LH)fIHzQeHkt`67SSsEy7z24}(YsOP<7x!JQith^)o?bjdp<^-hF+wkYr zsxAG$#{5#rVRL*gQtvUh66<;KBP_N1K|y)`td@kvK&$pwTn3H-WuFQx>V(+X`4-&C z>7B}_2z&vLI0Ey&n3U&d#d`Y%5{bMT1$Xx-KPyd^XuT|}y&_Dp<68^Sm)h*M-_MOM ztM&WiN~qo>dgK1Fx*0v{bt=<*{m> z0~aF0t{23{LF4C_UuGj-dymo@cotdblaISpcWw*jYkHe>(5+;S6vQ%rI#xwIb}7iR zP}2KQ1KZa~AGS+^TcW*`0heDjI7Jn~i`5pRQa8}3^_5qVTf}7pUA3mPwKYGVn}Ur^ zpe?hoasEp(jlbh;jsy)HG7v$3At$*E(f>+?=o^kR3$@R#EiKK;8fVOh;ZRaisT@`` zR>AG-b8BC(AG;jkWaq@iOIS5b6rGyks$RahI)lZgWn}#9s3p3ApVtXhCs5S@07H_p zl9uOAvf$l&rYTQxSs59t1kRB+GhsgiFI4atwM^xbcCCJ19)U6ij7&^z5ebbOp8&8J z@M?`RcmZ?7j39PDu%jqEPj3n0j|1JCwuJu3EVfcj!7L$)<+we5WgU;uO5>rWeAw>-`uy zK7|iVCyNI^8)9~tKt85RR*+tTAe+wDV9<+ZnPDkMz>-K^qERz&JU~w|{0sfxq{sij zG5$t0$UsKR(@A8nq<-!!(`l3bZ2w7)!(EQr@G&wV9 z>eke84<`8@y92QaAXDA$x%}TUivNNHRIh=O!uq+{FT$ql$kLi#S{!H)AVc>wW~9tN zBD{hC80^bl46M~7=vab*KK*4?jGLVeU(GVOukWR8i2t$M7&?Etc3AblU)TL_Zz7+tO zvr}SSxp|aREexdgjQ1l40?vP2@X|Ygl0FD~O^HjF_LGk&ZXKxPe0V3@yFcPZK(2x# zBJgw}BcYFd9mk^YLxM#FUq@M^YtVUs=j$Rf)OH7a3BEuY%Kb!u#~Hy?`b%3N2#Df| zZBpB9k=Mod_7jHnPd6XSiWA&-4{L8DYH`;XT3|n5F(_=>Fl$src)aAjFEE9d3_1*lwsF zZf`NWA;pl6@Cguw75{14H{d(}%0T|IB&2cjfr7`8;Ym~61V919u@jx~3Fcl0 zJoZXzFl@==vh{rbpse{vPwA2eki~>5!rhnOqu0I!4D_#v^|Pfw>c zPoy($wWXv!D#+T~52V<%{5qp76=P;y|-%AS4Io+^KCOBzjwsD|IZ!a>o`>0 z(_cG_xHr6Cd3||$W)*vX**q)^7rrF&8e(BuNw8f+p~Eo zB`&_HBqY=cD3f&e#@>OrB9d9~`-22#?62AWZ+ly9?>AbR+Qq#MG-Qn{p*5wYjL%qt z8GIUbCvrq;3B@I3iV8|4WE;i`C{88T& z82ARczxDR>Rhhsq8Drzm7-l%X=l|@VC)YnrPk+?hf1FRfL*}U{kD(RG4o(*tpv_m& zv^74nVNPJesGs>`hXu_#o1)G{G#51jOQP5ySc*V2Hai}lH#>}Q9U9h)^|t-L25o*Y zXje$)hYuz!`($RU#8GFs8z0mj;o!tgO-&ul*tq^oyHeBWUONAE$WuR75smr?#q%sz zwN$Ic7{})3_x}E07ik65Ob8^qe=Sn{suJrb4+4n!>_d5OxqczI{nV*lAm6^FF*5@l z@Y0JhMNTX81by z^NFmq>~giy&*`J45Gl!HAv>4cAQ1LHJg#5#LPT@A`qoP4CP`(Cx>#?wu{&;%Nu;&) z?SN}_YG!%1%H?TGaY2LIVxfZ@ zeMT*pK?E_k99@R((|wa2tY{M8FE-7^o~Vc!9u!K*?l`#;eL-TXj8)XNZ}P?tNm+qB z6X!G3IcBLzysc=ab&t0kfZAqz1Xrc8P3d2O5NL-%BV-u?m1rw8{lp!<(ItR~r{SdS z?pXlWgvn4UhsEWF9N)!vym#;d(=%KsL@&>e>Q-v3ZmULHY;QY6D7?2*-E7*fvLG*S zHo2BtTS53=;B2O|jFBfCrv7of-DuCi|qzg(rNh>)=>6;??9( zZaJx3Bc<9)soZ2+wRcrNoot5tp+~EsNPEvNHfLr-t29NbZkPYvd&kS|U4f!KyT-!l z7J>e!Eqlv6;tle3)?D9AbxFR3)}C~7IEhUCxXSxDJkAufy}Mtch|Yf}cCu&v6fgam zsFTZJLMs1<}yrQ+ue**y&3^fkkHk-9a1flMhZlJT1!gAQ|2}S8n-% zO3uI1DeA>**p-;>r7OC*Gm?D~N6#d0LkCcc8NSv{NwgS>SGckk4cINJdr`Vek6-I+ z-Iek;!kSCbcONu?ALjA*0pb`F&$Dd^BuweHPJ6a`Z_ZwQv>Nj16dOj>`_W+Bxoz_U z&38>Y88UG)z!52Bs&);y4FI3RAiv*#PL-P|GPE{5ci^%@G00-{qxjSMdgBjwKIXs= z0TIN!Fm_}SCtF3v7_fUU%-OxBi8)bNsmqZ!Qa2BlVU;ipcb`LHw(|!}b~ZJPb~{E( zKRRY2n~dmwnACfRgMoAOdO@VC6aiRTTlc2&c4k^at1<_(Is>Xoa9Dm`K6O1jqsEGf z4y}UwES+|_{PU?!LV%5L9_PcIYGR@ifrm@Al=K7poZNeNHXY=dF`~| za^$2S9PN}3W4(t#A=>(L88LuH7O*=By51LaChuNx^70am_H)mEfyf$xlFaiPb335E zmRU|60|39sNCJSA^!@?=M+@l`M$X3b9u{g~!{R;K4`Gp!8AcvCT&!BrdUsPKoxzjP zfBP-TVPIfzI8W)i@EuvWTI>E46V-9!kvjM2r(C)x8E3zmqoE25a7pTO1Cu+x3o9-T z0JV40X(y0=Z`gpbw z14eWt>=6{1#fPcQH@^GHgbowL9}W%5Y$o-#O`9(acc)#{srZNclFPT+J-)6#qDe93lxyz!cht^nW zJ2{~a7DT2sy{$N!8z`jh2D3VEFmw4pzUu86dJsD4%lRischVuqaXZKEWyGw#)|wgQ}$rnia|4l+1dH8!V=6Lc)Vf=enZ;|Ek#~~> zD`jd`$Lo|=0|T^0H{abZG#<>qPyddHu#g^yKf~V6d1h^W9GQA0a2;f@JGPd<^pilv zR;!ZDWaRRABBEOb)X?3VESSK;!a8!s%69xc(#d~IWz$U&8I}0TLp(2cs|Six)K9EY3B+Qw#W}Mv z7Hd!(;baGuI6bXUDyV#?;5E5j2h^8? zeg_9f4ef#fwX7!4{<`GD%#ux8dp=UbAo4D(T14;pb&~lXElI?$Wu^9owYAPSn|njQ zW~^rQYdEPX*rXV|I!3z68JX)o{)%q>=}lUR^?9gV=D~}0?hjSmP`!O~T&4SnL?&aZ zvHYV#=v*nmUZnI#*tPOZXBcm;)A%%feZI3G=)UIl?v&!iU zQ+&K(&3eOwaBaNBMRl!O<6okdpENdpqIGyxN0FXP6w1e<QAYT+nYUWaYKCcN1MMH5DZo|9BBmKo@BmJql85DRt7rz9hAMP*im&^3rpN z+i98^qe9YQEn0K*WeIO+C)ZnS1XviR=aji9gfEfpGR0)9N}ieeJF30uUT2EFCp5&f zA6`d4^WK&Vpp+#ipAJkmL&zXo`L;ayE8Y_KRRema|C{Z#*l}JS zWb?B6#15>25Oi~U7reo+kZ=NaOKhpgX0KnCRoz3db?wXqXw!23BEpqB94|UM2VJU- zV(iwxP&m`TH0YgI7+mSlD~Ja$t?>UubN_y=@I<{JSFY=1-lJWs6bEW%Zmpt*CfV?(WJ+=C+5v7*j+w2m8n|;gmVlmP1 zG-l6l+!rzUeJCD=y!n{oJ}u*ImGuur3su_$%h5#8uSr?lJt|sI$AT()lH2;z|vD$ z5@+JU^MFw{&FZ4&6Phi|$1E~7wZc?xrye!gsdR2$O}#na8uU^xYS~}bfGhjn!x-c# zo5pWrv}WM%*oS4x=B-c!Yf_7?s`aV)Q*=uMMsHTwGw&U7L_S}kXUIUaq2OWlEZy*N zLiKS=tL+)OxgFq=ECWO)u%*`Me^h8U`d1mr@;JONS0X{l*?2l;tV!oIjl1_`^t^Pd zXoetHo>GlC0c9{lG-XPMHlzGhJdTu(0*7KHP3E_`jrpJP@&82O+ebr!9}t#BI3a{IOE$ z$h-(7_0z4I+|hE=)0$8M0d=>E&!)~?uw?|2Y!B(lH|Px`P#43*ksN7J{}X`b&xEfw zi_EpJuJjC?NqG>E3b+L{yjv0MT*r!;m)aon?mhN;l5;C0ij9Lu)F1lEDhkc)6baV0 zU5c$qt0G#eq%oP#>$4m68Lt?m&gRbb$D!MPfWtFH9bXzH(E7-NGangWJa6>#GNlF zIgcSZzTXn~nu!xD@%M&dpdj{n_E_`u^qeJw^+_P${>kV{o*1e+9{OfVd|>*i&9P$( zcCZ2;UyB*ZRMY-b(~(8u@X+w?JLdlIG``$*9H|b1A`5-l-W(ns>0@47E4TlESV^SUIuXEir6l{ZbR3akX^r}k60n{SP(zOLQjEL08 zA1N}8!2Lgh&KT|j9NNIqlB=@gzWKpiB93dyb;ti8n0-czCJ2ftQ9OjkBr%EEpz$P4 zC+#GuXePBVUgLx>tVk$sAh{5T{A>2(<)a7`(+err6Q0KD>aOR`-S?NU@Mi&TgsB8u z;6@ORrOKPxytld7?}wPh#w&qVp!XJk{lSSB;zigxEuf?+@DJqdaliw6LQPI7;LVh_ z%!A=@w98Bjr!X6$pT=ox=3FF_-)$Bij;+I$_>xP-xE-1XU*mP3^G)6x9X2_~j%JYJ zl9c3%$duA_^IV9uMk0z^#xs?XNuGfM1cD3&`IWJTpo1kfQ*&X1tdx!U?itjR^t18~ zJkEg=++BxS&I?&7q_|aax>?#(rH`Aid@W=N%N<9$zxr#>||LQkR3888A!bu{j|mXU= zbd^fLuJ9uHwcDcEh2ZZ}3>NY5m z&px?Q<4{S{XCy;reIHbGTW~Y&-wN10#NQ|8Ufh31AVfl_si){4urC%MYG;J0umBlhc>9(JyfF3p5>19G$KE84;!-5OO_oJ?YIi7G%CN|;cr?1#;fA%gktDT!4 z-C*cOMQNn%c)BZ`M=LHV^wxb<0L(oqSBld?ys~EBLipF*NY>)#ELET1tJlV3!=jG8 zQhi>(+Zg}I-PpR6^5OVvq;fDJphaz63#*b=w{y3W+!TO&N?l$()?q!uslbVB}3Hv4mHMn>$cCm z-EnyqI9*?3Nb@K=7MN_xHOL*|wg;tgt8+?K4PT8`3;#d%-a4qRZR;Kk?iNUJcS&&f z;7%Yx65I*F0tC0<~ zwbz<+%rVDskw3MYnoJ{db0j}ZsgQ`$lX*mDHiM^@t`S!HcAL>n0*h$;i7z+%{d6gT zaAc8h*4FrxW36@>-s33}T^#Aj2GmxtPP<)R?-c~R;Rd5L$lpuJ7L}ai&36WiC*1uf z5nO!4NWbiAOln<=Eh!vY<{Uq9W2Fr2YakqY_K?hJR0q2wstgwBK$du-rp0=ZjZaX; z0d5DwCtef$n=h0HT?8frAEr1C_AbOF(vbpe%(+ai_1{UUe*rTb9dXgy9m1_a{#|)o?0N7R_h8z z`aOpW&3SBNv{tDjd``2E+!^$r#3^PZlPim>Un-9>Le0C$-%EO}%0k>|Gdl9Vgm<$}4a z@}b#R{iDtE&ByJgx61S1N_Q!iGsNNk7;`bC{X0FirkodyU9Tws(E}j6_=n=+&+;ZC z$LIjfttYRQ13_N&TemAg01prEf_On-xqWBLhF4+%zZMG*B^y4F+1Nood3cRnBi5(; z`?u%jMI-4L{g+|*Eu!2(%XX*4oB-sRYxUko`l*QcE@`mzXVjGueh%QS6y6O{RXObQBYC1 z*J#I4xc?yD|Fib~`^N@--@nD+uRSe+)s`{f=O#Dx6C*9rjDkp7qhuUuIQPoVV)iqTaYq~ZP-JC&Pr1&XjP86W%w{wUKMjlJ zJN$mWabA>SAdzkp9}g5^H&-{>&+>Emv=U2`i|TD(+K0S^d2b2(>C?nEfSH~+5we@z zfv$^Ii&?`HwUAE1y0E;e9b3tP0slvebf7<1>m^!2p&sMvdc9+}lv^+a0PJE&?bZqx zimj-bI!UQ-0rry)->B-y;q-QY!KgpW%)2KbSNU_C{%-*M-x{7%fxz(=d6K=Bh#o-I z0%MMcqgJ2XN!}ZD;Df0Uv&(v$MIJbq`4^px%8#GCokMyoVQtBR*K5m{<7A(aK7!*SU9mbFvL*qM`_Enh9;b9xfP6W==O@4jHb@yviF~f^ws|$pk{2wtsDrr z5f7G&!q0HXIZp9ejn(rngmxFECTUM^OJKr3Mv@5nqGcc`comnIH)TmFXn(f+W7z$F ziF2?2oG#~7NJw~X&|aMv{6DA+B4^SSyh4FG5LOPN z4<~?vF-W*vZX+|KPb;sH&`EjLfL5q5v`0Pb?uIw5{yjGwxg;2&y6YFczs7I(NavCW z#0-m!D5%Ia~jDt?&QM;(S$whi&=_-)gIBb1btrUTt{p9xU za@ZPm->;^}A{T)je2MaLe*s}PMbOsE$Vg39^$1Aj_Qt842S-FWW%K?ASlB;F0O&+~ z&Ifm8m95n-gOi}e-6tjxmc-J@QS`G8(`q%hm$m)m&Fj}s1mu*I0=C1m*s4oP`r)7Z$>EB3}sb+AO#nT;qTL ztm}MxVJ(3I2G&3neCz93*#X=O12Hi*B{WBQ6aWMlT6dBO9P;`9U|IZc*&La6a$3LN zHksRiBBQUbk8Dij@L<*QyEI^BXSLl2qLX57r*OFrg|W826H#brX(3@`WwkAHSqGCC zK)`n7cP9#Y^8Yrr5S*Y)TnfZ3<-(1@uttBfwcKxxip#+7cWG*BO49S15v3R}$Ntz( zBxEv6Y}+<6S-J|EVfPo!TeVDzn1C9_wHBwY2Iu!_229+((a`{-)gKuJ2d=40S!k;{ zzFnW?pABP_SbRPnz_8aFdPd&>HH&lBYdEFKSF9LCV+sY6(yZZ16+;C;q6!RGS1zau z$7~U`15RX&|4?~VtmJQQWO3cD&?O1R>p1V%4i!aa4xsvZjxT930~n`fj$lT6VDoOE zrRg!&$kucn7pR@SGcnYE<|}2z@*< z=LgyKmw0I-=`sa=B3qVVw%^14Hlv?z)GCsmqL#K#>?DyF%updV9I3Uw%*>&eI>i~v zqb|~L-(j?1;MF5-R`TK1sz5WL*-BCU`KsWYjok!gWBQ3q{4U^+uA$c?q)G%*mW6)z z%qCoGp)9IzQ`>E&BnA`O1HBqkw~q9f(v9#gC;E)zd*Ok6sI1oITwd=GTppj0?C{)6 z5&hmo7oK4x$Bd$}ws~w0y>=MAnjvJf8!u|VE^ll!9Zt5@cfVIU)$W?Ts5(zbOKZj< zC&O_)=#p~Mada4CWnfcwI`k>3S5Z<@y0pn#|6YA8thsETdf@r=d6U!a@@3TCQpA^e zf#C}<&|rEw-HnSvFg7sY;qud}zQ9ZKGaYTwtnhiXBqBUwt>1>kq*n8YqJn~`!$HfU zI+h#_t8CJO&^xO=ve=8C)?2<3LF!YJzcSsBshtlOlhTs5-*kf^T0nQ%N_}tma)Q=5 z3XDEB(v9Y^JX`Dy)6&prdss32F;r1pyijLaW>o{8@che}A3vJu^t7}b&r{pya04_% zKo!Pzh+j_IZ2#>$=rcXTkz)Z-2}VTE_nTU$&-A#NtzI|F*x1~BfQ)55R2k960Ercs zT?m=R7%#d0GQPfBz;>qg?kakvUVU3b!^Z0S=6C0rt+(VS5FxSkG?9DqEq4k*LG#`4 z6s%fA3qjUjlsP$AjgP;+U&NeoO-`sXf9T9;z5rw>WMuSIVcX8`a^Kz5)PirCA{(`} zFP9^?m0gl;;T-0Xu!wPRiGOR9dF%{KSPSahWxZ5WJDGgKY`f3`@vz^D7j!;ELH=+| zWoBv`MI<uQ!M-+ zKE+SW8Y{ZI_Q{ z*HsP{8q>0Jfwt+NyW*-G0}z`2lLH8dZ&5%XY3qlvxPY0pBf-=Bshm(6-#-X$szhwm zq~jx>0>Zqnch-F}z%aMfSC)s3AMKqwQ{zuKzh5prM4xMelXl8^$@2jmukVMZEoXNL zP{_9a0Q-Kw-4Qw-kgNU2$CUAP)xNG9s^lY8!fGC;zK)i%U-boF}+xNe@+(QW#Oe9RZ)d z3G5?&ny9V@6o}1; zLmFk8&j|2G8s81&n^EMy4N>Lh>U^JV48ynD(>V+C+L%2Cjqm0!v9f@@_-8c!lt27` z|Kp+<+!|dIv5sT~U0vORtdf$I#Z{UqD^F93wzha@Clm^`x3|X%X%(*U8aVXeGFW05 z-+j8a-(4k^;J#vLQ8?4$aPr$#hupp?&i@2h!Qv&WCfIs-lArFjp z2nl@>2nq~Hh>zcSYW+M|x|g12q2c(lbrDFlxs;%RtpFKWR!**(Xj)6b38>Km>F(py zq|D6K4-;HWbaZ|41+T&jQbR)IHpsCFk~3orUkjk9{#3EXYVZDBkbNGN?WxyUnF_&L z36Jp^^d8CUwt2Z7NErdv>CK*POQYGy&zqZreZ3I;*lw9fWuWu*{j#>TrNvrbR28kf zd%hnZOZVM+%X3H1jPiF{E>{bUr}T=+b*A6G-+kg4?YF1_`ux+DoWLI1>u7xSLN7Uk zrI?dmL0MVv>-*~-xwxcCtwF1*Ut!N3;xd{)Ht&DW=B!`TsZUOx^+UokHQgqN0qN2L z!C8#!DL$Kbbw(xuW3vnev*7LcD)m+gYHP>h;yN%PRLbDF2U28VYb@*UV`e5J@U7f_KLN)R?Pt-X*W=gdh4Ikc6CWX|_4ulSaGaNpk4bc9OnlMp6WD zioi!oF8;x{r|8oh^Y)n4`+99tQ_uZ;hIjUf>DI8eDlD@$Pe%ru{p2mV$IdFmGJWmm zNG4~}qMDxD^=qb%r3Odq^>R+%@bK_Xzo3_A%#|(BpL@3FD`Z>|1zNUFvA!Pn4m^(%EGr<2&&I=!N#I~x`CJ)jATKsTno*EeYDJ!0JFX< zd!Rh_4rJA~t>>fPD(k-9{K$fS1l2|hA~8xj*2>e6XJ#J{myDmTnp&=Q^#bq9n8{s` z41{lo#Q5-ZwcVVP8`Nd6U4~m(^VTH%{o&TKqado95 zjdcfC*1YtnK`l$rRs{cMuqgATrpNbv{;#(9_^f(vH!0>>kYkc}5UwBB>7deM%&INnHOUnzE4lrI2i3^964gOrFd$&25o&N5HX@Te7MuB zzg>ir4oh|mzyC-=ES@RotS6NlBnpL$MTy+>sH=#cUaeP*NncR2O**$f!n#}bx^O2$ zzDs-iB~qc?W-;))`+A8vA7Bp4trY!I&n+waR6_XUzPdk)15?aK-J$k%GbX7(S7fdq zA_gEy*9pvIc)7W`9gFeJd@S-gIT->az{8HQ${m-UP_@xa>B01_pe0&fACOWZWb}d# zu(oFj2YJEE5^`R=wPAvqYUnTfgp8`ITqd(SBogUYO^HR^-(ayYu=KY?hV}#$$Q3e=h5WoY2$Hw`_ z8Uq1p@gNv2K<{pYJElV#_IHsW;CSEAXrY)5R{4z69&akOFB7OU504kn2nkaXbIX=o zPOZli(cqX>C?8RyZa;~9SBxrA=b}}i^CcDi8Xw)3PX`b6wK?zm*{fM&6_?wqqfba* zpx?H=H$|Ks91g3hHBi{j4fB5&TouqwBwFL**=AeO$^c_+C88MNn=dj*T2iw-^a;wD<7o&d^vTtSq5fzhRn}*^*d#m`kctb(iP|Du9=W>==>5{6I)g^wXzLBQslO1Y9l*tf3Z#EiKKI zqbpjbvnuh#{eq z+^s3Fo)T;rB3A+r5c-VG;gt6C_|INg3x@a9R8)pJwPquTjtu$Txa&u&)Lvei;pwaN zKx&Y=t{&_mCrd|RRHcD!tAb%8vtMAe|XPkz>Var0n)DAdtX zJbItiL&JN%v=IBjHZ*k>Epb+LTFVG#iYsyUabBNRE@Cy^ONsY*%%6OGap2GTi}lB78}c@>O2e z{XRY%k{$vEhnUre5AEuTiVcH$^72geo%6OTJfi3A$iY6y!5zN1E)RYP_&P2)%kr2) zB_c7fp(DSX7Y2X86X8)qu&r7J%rGjQ=pO3C9rax^Alk3q9R{hsdesxesrOw-&V2Fa z+~3kQi;fUaMaB3e-n(z)0{TKKUgCxr{{F+_$V4Hb|b#Kg7Hh zr*eOOg$pkHI_na%M@0{{C9|}iIhE(*uTbHl2UldbmRk2%%vQ(!vpXd@%?iDUI&NMf zed9(9mwTc6GfwR4F@ffGddP4xd)y^mTNg1bJR&;Z!wZp5GE)WHHz#i2cq5}S!>g;Q z`$`B~(qxQEnVFEoeHwo|?(gj}x0%=9vU+LALtHm?d+0q*y}!KIa6dpDMfvoaHsiMe zA8-5hvv-I%fzp$!a^7ICLbd_JYZvqi7cNJHaGSgWHn_{OJ!r6)rK~}A_&Mq0!7Ojv zKCz0YPbh33xF3*mY#6Sag(Isw-$OH*kuXm@ba-|?-$5@gk6J@q=&7v1%>hR-{1{v4 z0!@v_^#^~T3U=8$*dpSFE-Y{(1U-vk7!%ri4{LVUck~MoE(Q9g3Ep3Y@qFCpYm4T1 z7W-gFMz~f|p6z;0o@s)|WeY<>O~>5S^j=jln$bpr>w1xXjHisKzyVpB0}5S!jPHeC_o;-0p4 zN9D(DU9Mv0gXZ+p%Lp!ahCB7XLrxuv?SZE`D(Sb zDf?`v4)$bv8xpWLA%7ioR})AIp7VEEmAY<$Y`x9Xn$&JFVRVA#%h?tc6u3Wr-ip8^ ztd}kvw#w-?OpuBmSnb&7H1MV=*0G^~$4dCCsJ|b+M+466dQngJc{k_i-MOtz_mr*k z@>VP*V>sbyAU!IMZirsz8!X9rw^&U4;|Uq^qO5Mhd*DAN;j?xI;k@g!sZXgi_+HM> zyZnLqP5nz{KL1GCE>`iw^nhL1h;LKuJ{@P=zUsionJRNJ^V?yEj2zxoZm!WMy}MY< zf;=M8(|rfvg3IN0M$q;Ro5kc{etFsRutQwam7l$Y-HI#daDi}7MaU#A{o&;2UV}$` z*ytl1RYB)oBme6v!*0d^q-C0}yBEdgw)=;&6buohm#o659(vxMT-NdfgkKckCB@(+ zJ7OMj-`!(DW4?xmW;AkKQOQa8OdKDH{E~JsOK{wIb;O}?T4$ydAN1^+mh|V^7potW zik$OghtBlrb{yRpS9jAztkuR_z?Z$UeN|I&V<+GfXWv7S=q)E%@PM3S>oLmy?w;uN0>9UtOIspGKb|d8As>AG zlqS7hKpE5+uZv2ozug(*M+3-1QOeB#T6eL9^r%{ zQK!m85Zw>tMcE4;+g0yAbx)BppUkg^Ez16;#B8jz{LX~K;Y1WTxP9M6mle3e^-%U| z(7hIOxFIwjP38VZ^I@;jw)rO23J!$>5yK|(_}V0G_1YvM%CkEtN-V_a#9X;2TG1^E zWT0LA_ek)Q^iZVI%9azR{x1L3P#7c@AuE_XuTXIM_#mJ8JhF9|E^cqVkHiwu>c|bH zmTnnPT^N00i=}pOALK|2!Dbo49|SSyIqJ>1Ok$WM0nY};K`OV^wIa`ljWF~t6kMs4 znrEF?LZp_do}*dHx4@%p1BL$Lv@5~Y!H|eHP$%J+zB6|dEh)U>XKr|c4^G{cHfy^M z2>PB*c*5+qEg{ZI6CQy=Cl=W32FWRj>DP z3c?sah79T1Z;E|dx^__?C~PA|TGp*lRO^fB8UQCSNmEOzq%$yeXE8Lw+6ya|kO ztH_5$1r-F4P3xDv=-sY{UY98ah5k%ni_&fWh#*FpMlER9jbl?)SIb z4XTJW6AxjFJAvaAwI5UJ6T(^Y0!|TC#g7>ahjl(LMWw(_BL+`JD|Y^^U*-*OWv00w zmRenVCha8GZFKwVN$oH1p2Q9OHgNY}yUeLMV=2qb?@mKQOPwk@5fPh`#(n;6f1?lY*3qq({X-*cO@{k?ZR@Xn5n1&EV(@^uc4x! zTD6CN!fh_k{HSkS_3dDO)Qwg0*x`eNc2NRrEH0fQhb>instrHPT}EF2t<7A?Hj<9r zs)PjDmXfcU7A+|glE&iZGRo{<7saHrkD8%-U~E>M57Dovi$De`%NcaHl!NYSw@!A z5MAd*40v;xpuU(&Au{sX&NWznUX_p~$QJCT)AgM8q#+YAJcb*Q-b^PCCvO|CXOpOS z4@_Xo;&pSg5ER~V2`mmYQxe7as8VISzm@C`*e6t%Mg=wvNB*aNNZv4iT!JaR+#Ul*i zt3^Ev+xR!9$=%eCubiEh=*D+O1rOa;16G%w8{}^2I3Ck#4Lpj5>(m@@E_4Whi$GQ| zI?i?tiYp5u_uN1ER(Zzq^LMTSgR?^j^Ck|7?24Kkv!%Nvmrz~BU5234Jp6lvPrk&M z^=9eo@v9MQXLt?vNDWvRx$TA1(aU@xi*Z*2S(m!Rj1TN#(^KB}#1g|-B75hbTo{mF z<>AW*9X0KF_lml?c|_2DO!YmTko1eBy=)X6&oG1v-TIpx9Y;n|wQ^UID|cQ~4L;&p zYt2J}%`oZ0A|ig^pLPVO!?NhE1szOb+?czsw=Py;tn@GO=};nAMd3~~SXRj`4fB0C zQP%t8O>{!G*D%!Bu4B922d*9=7t81`B@EW`Yita_Nm8WTO9-%1(npR7n)lkBwtQ4_I>HqkBQCtaG|aj|nv2n%FX3)~=y^#U zzz&ztzMI`^Nc3?dV6w*3UjJ_5>M?y^{_EibE0*UEadb84VfZ>9uM?neQjN7k_J3Mc zUP7z=37_!|!QnXXOXUDrd0Ya{O*v+d_H}*$oe}RmZb&C0?<1YiJsXnZ!oSYqw&K~7 z$Nq47;6Bx=dwqRd2yx%e7}RQr!B$XLgYxS>F?J`EItQ+*P3cK`gw|Xfk(ylJ4q9}1{>kA6bf9}~BP*L<(*9E8dFgzaG zJAX_}L|j4-A&Q}hqt3YK=CmLYo(MFNBkAp%P7KZp7hjhM;TL?Ede~p~enc%G9LDoI z4Wq>@f6^sImgYJT^vi2SMas@tQwHVjkee z^64Stb1QJTtotg$_nM(9ZxzFb`0vL&cT0+}?pRtm5d_F&pAK;MFuCgr)`VuvXtsJ! zfjRurGB0`C2!31=QM2>WlaHfMGURd#M}mnjPEb!l8nAhuXixhdx0-XOG|MH6L;S46 z&yO&DQ{U%{12`MIJO;{7xj`#>3U1q5T5Vj;40CLMe;-#_C|w_Y z9mlQ<3v$=DhDW4_9ECpR``uv=2>8%}hmXt7-)uf8yTIWZu^oM99{6rOkF0KjU~&V; zX+CnXQq@UJ6-q8q+xkQLuvny3{QhHqhV#oO$w3)4b`#ot1Ms30t}o9q$b@>_bw>sU zOqQPY-MTEp-Z&m(X>1A01wDt)K=Q{k%g*JPJB||dOB#H;^FAAEyls4eA`^@60XCBx zt5U&>U{i7h>B$Lbw`rFQrNab7sN2145N#DDvMK!n;WSIOd4_qp`0$8*P|+ehFfJ); zEc$J#OQG_I%1wfl;H74FUWL%#O8k}xNVU4Vcx z;Ol;}ag_Vk+&210o-0{ir=QKY@)=1F0N2U}rtwbuwku5|k3tkPIRXf1V4W;@CpuN3 zgvwtWAs-@54{=rQQc_%tmCLN&89;|Q$8fgfd*X9Ili=u^zFrW46dsqUio87429_%H zsMO|^0_o4P?K84!<;es_Nmd_1)@rv5bfwtX@@LaY6kW6aq~V9@^+-{OChyX4zs1%F z+e5OZKO$FZC<#LtJ;b1>zB?V|tW#O2njGUElqc@%O1v`FO;Ytiyw1|PBk98Cfa9^X ze39``gN~%N6Ct_R!F*sHsLh@oXhTVxAz;0nv{P2uIJ!V{Hn-Cqj$rka!NOPT(cpfBPQq2a2z+ zW_#nW`W952ifv&Ehwb*UJqbLW%g>?M1TMX*g3`DP4;|#LD7k?f#9X4=Bkt{G-M6_J zNa;ydnVz+uI4kFcuEMLb2QtENE8r>_AYD>nE`o(KU`#iHK%5wJkN3R+{%e?**spxb z^`-%*Uyp7~ba!VR6&1=qTf|u=F}@}z0N_fC9CSW`n`Gmcwk_V$uW|}E({Pwj#iQ<( z`4i*=2XOc9pLW@8k7gi8`6uMQae?GD+^j2)tU?)HU{x}$J0LjodlE^o()Ud5b!Crr5bn=_SAj~}|uwJZ48Vx38Dr_~8B zCbXr_Bu)+SsS$wPwP3E+Mt(~Rt$lCf!Pcjx^V?=PEM#L6WPQj~lAzG32S*!Jlb54w zw4u5Xof>1}*af};DOtvC_9zCY6+R&AbJD)lz?yDU{Ws20p}hqMBsT6K6sDc@R>x^8}-1nM~lK*B^0x; zw3a%{cE-8d_Vykd8lqbW8y`3Q<~}(m7MkDuST;*8t3_c;7cEycxE6Jxu2=wgc}kSH zuSeK3GL}HYU6BH9qbZohgfAD+Br6g6D5+GQX;u07x@S1Y(2a7M4R45Tl`}g>j#Zll zs#U#v{q}8hebT(kx5Q`N!R_SNYrq4l)lj1k#)94IJ9d_0Ny>{xqyjq9^S2tj{eFFP+RjDl}e@AP92FH`(w zwLG=&r}`oScOJ6mbvlJ_P=zQ4R$xS+-FXIS_*5c!w*?Ah9sMw>z?kHF_4;+S?-)q220j9F5E&U6&7gtjuv8Skzg~)E z7Ze20MYVtbu3=?=aj^JSQ&WanTVLOnBsDF~l1eGeQdm;@kCpyMu>k4?J}vk{;P+%X znGcyN%A3)eQk0eLSpDsO{q0+~Z|1M&!!|*;vz_#Gk}_|s@bsqER$Zc(SmO}vKcAPV z$PVDkwd8gCtA5E1X#e!OJ?%*cR>eLXz{LFaovrX<}&D`ceJ<$LBQgr6~7hy5k| zlES_}^xUnlpzS?9J);4e%^Ul3wQ~<^VXTEkMeCqod3dT|W@cvokf{wkx*?BPwQKyx zGK3YxWkUXZl@4)wY+_=;4;!+nEt}tfCn&=k>GdP%?O|Fi_0~%JyO~nI1i~{CHTV1T zA5X3ws-l@l|M}^IiC|>~SPvfnH|NE+SVmV@SG94!a)!WpL6ROvEGB~O`}Zad8s^(% zoU1I<(&5RX-fdsH7uHRjvj6rJks!lRL&Ax;Q5OqOkxhS!$CHT2jr+UCVy<#=b8{nA zhe%GY#vyUU&}6_h#tUft0UQ32M(4&piXH6>@KKdFepu`zn-3>a+-5j}xnCLf1v>Y;u3Erx~U019sI1n5z@b}ceY*Br5L zue7Bl8OADK%4<_|KI=Ww!z!p3mv9rI^|U#B>lJZ|otejR7(D zE4Ihcj*1d}4|VE=O6A4HvI+|8>$0w{jY*i8b%B7BmKlIy%oTS!9?0=EtS82iZ+uyD zZEk!#Iw3*oTUuILSdl2s*J?JSR31EX+tMtbsDIgN&MZNQ(er`A8UA8#02)5xno07D zpcDsH-4ypNE{g37j=Qb^upoX6=1FAi`x&s5BMx*7DqUs_&1Y%urLnt1e#{yu(f zZ4DX9UVCi!v1uqhBa%JyZsM>vCB0myF*YlIkX*$7*hN?40#KT!{0XX);$l^9TA{TT zZ~qo?On;vkM~zB7Ve?UvpLrcLgW*BBgt;&U0Vg!1ukeH-Y$VkA%!UcL$lIb;8r!+& zf27+7qpTZJnZD`nV0aekwjRNIH7u@B52Wwa@=yvBO#DPKb0kSJ)6@5D;rpY=5Ank+ z>f0_yr>Um-t2Vwe`Mn3ub6ZYprwzGv(&^=Bs5bgMI=ey~M48b^9YE8q;*Pj%> z&aoZr7e5ihkHOoi`M@Hq(p2IxvQdmBa@FBT3h7@=0$yeBxMA6S;fw{RkMHV*GA-qW$jo)VFt~)Gp*D9rd_A+2_ zf`dUUcz`;V&7wfAoTW`mw=C}w?Yp0=z3~k>NBoMdn=_(Zd#l6yx%FRn0I)znV`z$O`F4l=q+OHbCQfo?ydFKt zamPG&(8Ye;kvm3X=O&dc(}tEbo*dC*RRtTfRn_Z6<~`O^x|B1K5U?tZ_%RHQ|wux&3+YM*7JUjW;MBD1Scv%ktdW`~C8tu+v6(Mb@l$u2M|3l%m*mCUdNa zw8qoz)g0+Yesq(`3Yhq0axzk|BS5ccn(R8;P`t0E2Q(FwN9tcDYMT$^FTOuqx72TE z82Yp4DrQTulH`c3W?>)floLYZ^~5!34ArFOt(v~@VCo?JA0Gcb(W)O}R*G}Em4eVMw>R&F^cjxy@CT9m``NvEhr-^t9oJ49%?;0mM z4$na`&R;EMbi7AU{X3}3Lm5|^a7v5e>Ac?n!mj^D@S1Uj>2ixRcYIAc#^_xG>uhYDJ)B zG{pO+Xy};yV7bs%@sZG8Q?B?#@0ZU`2bVwotU!E_`5q(73iM=8@=a=REV9==O&cGF zi*Z!dwQxWYINuIp#qY^Zg~U`llezSw4|@!-Np##Hy{YF}l#>xAWREI|2U&z!x>rF^6@6ChL?|XtHymAR zj96sYExr*e1UO3YB)^zru~$atw<>XW;Bdgh6?Bj__~N*KgZt+K^3B8n*DKjbvBegR zI*8!f?;lPU45Xj!(Zh`}NpNu$=q%3(;-nnDYfr997UG%s!Dz53u8=GA@^nY11f}py zhbx&?$i+#-u^39#_3D-VT~WT;EXvdH>*|Ow)~qqztJHMLSDN^Y)}~xdu-pxWYLdKQ z!Vcl=Ne{S)~6^WWUSN4OJ5tz%&$g*qPCJv;SP5eUtU z+|W!v`-_i+L8FY=LfyHq{1@2RLZ{1RaJWYGH1yAvM-WdiTQxNHD2V#^N>Xd9k{BHo zYZPzSr7F@|Z}k6uMo0TtR=`$ZJu?Un_&R7sHS)0|g#|%vg#|r1H$*Zn=NYFAg0$?a zL={Gs>|F`Y#L%ljX3g>qZ<8!BL%Gn9{aFQBtR~z+M3OW0zpS1=vaP>)zuTbX>4pHpJ4pl4t0A3+&$D{M`a|n2LEI; z{0oL8c1VA;^LRgd_DoN$qM`yF-Vdce_Juj7p1{V` z;TE~EX@cW`#P*cy_DtZvyXRtDILYkBefaHHz098*V0%{pg=4Bn^d6%BaPz}U8!l(1 zx}WTJrqU}Q7>D61FG?hIQPe9GIV(pZNY4#Y&`;ukE{{We7akEI{Ht!NrO^@lHJQgz z=kf#e5;{!~!EH92EbfPx;c+tf`@fFqH(gJ+T-8FB=jJ0{O^VaxgySW?d?`cbx68aal1VvebM)Sufv17}^X}Jq8^PQCTJIx2a<<@oaD|MgqIokb zfZpaXrNpc2aoihh{QH_25xi~Nt_ue_30*c~WMc{KN zByuo8+ZPdMyVNqL?o=L&`G%y3ZY#7~Sn}_s-d`#jP;&>1z)JTRpk^d6YpMwt2_;ir z@8m_9(K1$61rrpde?S$#})yRq@@!G5N#s@88kCnM_Ka-(G&WUj|m(dSex*(vq>5FOUpn zP5j~?w!NW29h7w}ULT&?mQrtXhc-QXBw@W9R-&czLOD zSG#>5&h;OpG#-8}LnCUtcLEswkTWhhTg-#5U;mMU^-0v*ZQP#ykwS6 zBnX4rjl!@Rk8cfSRRB0M<`1r|6w6t#c)j*#$*HDDCOF;&&okQKV=Uu7#TajRc6ch( zQ6^1zzG@eNM}#g;pjlHhqOYq<8iI={IXyDwb-}ry7!s`X0y#Gvej|HhMS|fK@NCSs z??k}&3F~3kc>NkjAVB9d>)|?UgD^~gauv2hs~Xl6ksRi~x6^}1 z<)We^G@B%Tcy2o{D^RSq!~?D@Mj{Pv^%`eZz8R|2z{gnYkj{}&%X2g)h4Gw_I@>rHg> z-Af5QjXBWS(l*a9rE5aDQ!yKerH8omQi^VToUJkprYz>VTyVf*LWN7Rqv+{{U$rYo zN$Y&EB#U(YP4;!3bOcpsAft_{?_DkPEhk_}hD!jg&RnT5WZ^Yi016Zo6qxvM{#8dN_T<7%#!}+nCHNFK$io?e_~$;ZACfijO*`(M z0jzk?t2&R9;C1KANke#CnP++}4V(;Aiotad`ANe}6Zp8IEU%_hUoB*%RWVz)RUZ{e z2S~OCWf$AWkSQFc9yZ!8mszt&qV{(^cJx$w$vb^LgV>sWeNqHi#9RCRH#!a@+$1kt z3Du>?F+X6t-&2JH!gm~Hx8hS14xePR$Jd7sP+Mw9_60f#U$hB|D{4H~(Uhz}_$n3m z9V41wBdo{`??JwW3aK|2QrSqRP{J|73n2I zCgYm>#Y3+jtPt!(zadE*;Mo--?}tT5Ammcztj0sZ2~hsqgXY5^(L2h1j&*ist=6+i z1wDT?b2+4NfYLIpiYY?xdT>>uVQ9F&59a#Fkl6djI1)7V<_m0$w+AObv@Cs%aU3An zol@~S!F|3=o1_D4J|_P6!t{@m{U4w6rJMoS-C5>)d`NSxGH-r}Ceq26h|wnV*;AQf z#W-q(qul$hD#((D8yn3Ny*C~O>(Sxi{?cKk8%y;4rpJU7zWoZw5v&)9WAcs7Y|Uoz zXiwB6{5ubl9A|s$cq~{IRfo0Mo{|Kq>FH%U2hU{>_}^1>v4sKL*8e^_xX8jO(GmGI z6{EZWpw4`#gxRbe~v9W9+C4FG-WlP2}LT7CQV{c#b*T80Lq^Ks5J z|7r4lpi7!@qzD~A4xknFOQFYrDk;fiIFHtDh;L=g9#Xg5RW2wUdQ6wf6N|~o&>Y)aAYTSgtt&eNMe;J<6oKDl99_Rgfi9onf^UZFV z`0;|nSW3LHkr7{_y&eOn13zSaSbN%q#_Cch z=gU~05}l%J=9m!UI6TfaDys~=+ZI)iCV#lz;`Ll}`Qz@2)?{~b}&dm4Q2T!eyWS1SwRByJfj?>KR zVr&K;Fp3ID*f8(CyhLz+X4l)T9C}>}nk5^uJoam^I`XD)>-VOl0?#isM|WPbhuQ9Q ziDIAw$WS7y zzzBZ3DqK)j2_Ey5a~U+~hN(yFc%L#Ch4;vV78L=QEAxpdPMcB|jF8YuM|&20M6bL% zC=MfsL>CUbo@+-z6hj{K_21;vV(__v9pgC?C=}Q0+aB{=S}V|#@kI!N9t*#R0# zY1<(jr+{2Z;xzqo|HbW#ZbSS|q01$A)}C+?MCm{s%I7geghwhrx|@s@TZXIIXv$2k zz&s0yh(`l`XEhliB>TvY>M-9zTnB$Leg;wDoB<_(7Wpamb+^42DD7j*L6vaku1g@Mp?%f z8(uBuRmAW1p;PW09=7^kUQ$+eTD0Jr)+3jg5+r!o)y4Kav}2atR={YQM!siXIQ^3W zap4YSLjWYe9*qTG3g)FHfq|(~dLFVIM~;Z(EnAwZ0V3t7a!Cq*iY_QIk8_=?;tb{c z?_OAl82APbKC*NxpvW@V(1J}oqUpvy70fRvh*4h2Uy&N_9MxlG{5xR!mp<c=E|h0b*)CHfkRNsV|r5T6}I2^-LKy11jv$>dRG6>TMCHS zUGI8%W+w2u_2|M)zr%bVxGYxB2>#5@?kHYaT<%E{gfDLb(%pItPj?=xmD>a<+QGe| ze4!CX(rohO1)RJ}aC;y9;s*NYkziYh+nF1+-E{IPdL_yEd^)%`)Htcj*5B<67l;K= z@6EuqsHsj3X=454Zhm#zyk&CJ_{b>akH}9qd$h?nntfdTVSgkMl90^!*ln4*+kNq? zq$9D|*{99=R$Ut`PV#=Ny-vn0xyzr^C<*C5J4=Fl z;qjC&%<($f!$FG1>7}2uM;pVa|Ej%`Bf1`&+3QcCZZocknwnzIZb>^F(3fk!=0~>`4_s1TZ2g@725^fbaW=Z zJVNcR8U-8KL3?8&6NTN7HCn|hG4LGqf@#<26Rtz{M6Dn$zOGQKzb&&^JX84m-I7W9UY}2tk#psl(r%8%+GI_Z%5jRZO0y(o`l&0AaKBlpLH>qCL%CXDiQHbjax#$+@2i|+nWu~^f{uIW`IF6L^+zvvDKsRK02_*aE_cCiLX+q09)#@yt z_P|*t+_Isry6Z!yf;l=HZVzfoDqSUMcxpse-p&f&R8YuW?JoDrgV@XrHs}GI-hrc_cOh)r z>A)bY+C_ejP0}pvTY*$^LQzvcx_j}F^D(&XTVFOG!YrpQ%Okh6;>w%*(TNeXc1z|q znJkV61`);PlI8)xdg5iDl;P7?otupic6VrQt%|T(5s|)ihMesm6~1sv1y^X5if^a0 zIvp2$%=fSiVQ$xo%?ZmcYONHesIX#`Pl&H<;ou5CVgWYQQ(=eomS?2$eE*Yeui)AK zrzZ8ZN`v7_Ez3%JQoL3xBfEyP24^R9(D;ocMkl!bN@g0H!+}9UwF`9DTGEV-xnKe> z!RB4qQh1ipd<$z%e%mftW3n}UufNgRq|x`-?=^q=lZhz`>IVCX%1vB!93#`K&Q*I^ z+8y$aKL|Q%m?}&b_i=cnjr*?`{%i@T)XG<6cA3@lEP~Qr=${27rn|M!R}Q%fHVTrMY0c9xpm24K z2IukBFR^dV6a=E7{J;nBF)GLd~87TGuk!pKOmU>=j>K0 z1{{AEorZU#bu%n&fR!re3%ESW7QAjf)3Er@gIfItXk!PupaHJgaBbqZt9)ipfnL8A zBO4nTkHz55TqEdv^s_pRDG~dC_r_H|cKQf$th@sDPdf})`VxKGD+2Iflv<3)`aY8f zxEo-)j->uQy@d_n+c&=t3WL3w1$XVAv(Mo6Y8d~P1#@rYD2-Sn3>|t6E|;xP4(mxd ziaWj7?}b~SXJ-F?IYv&oWf2%G5qK$n|3pJ-=|=PKN!6Ugm;eX3GnBS8YyUd?%OIV} z%sGmgagJ)5HCSNVIt93#c9KDQ1<$?yTugx#9Q@4>54d&{{LO!D3x4DA88nHaTLC#W zwSHF_RpxOG(nCmqNWgy4?Bza@ul_?KPBE;4mt@P3K_~#7VttzT7dYDC6EE}d!?w^Z; zxx4{br^{{HHe&#^>RKBJnVK;!;K)q^{2aWmw+f- zHM%CeXmrUy*JV7QCXX%Y+#_nRWU#kA$nGFHwlKS*@V(5siX?jImLM;|wj+#gEG;`% ziztWmGBcgk@`H^dti?)edU&$xi4%G>;2@?UF&On7Jb~jv>pJ@3Ca9y6fCs==aE#b` zJqA7Q&0*L3Ui-HnuCN?e5+?+nPQZ!LR{q`HPsI1Tw^MrqX)vV1Hx;}1IpOdZj|*^a zdi#6!BE&1>Nw~O#J;cS1AC$j#kgYYEuh{)Wqh=y_rrOe!oqJ0lKia_#Nr#li_*&{^ zW|nqg)o@3>{ym}NiFeYj@~Dq1*}i`-Af~9=1A^}^BcKoLHPG%B8Ec?g0d*43s1`ZwTE8*k8gjoX6XHk3pHW{) zV`rw74W6vD@@kVUNR-PVZ84`V^W3S{#uFxlayrStjB?bQ+O5^DDa`@xZBH#TlYAzt zY|+l-Ts3%m>S^?o4Oqz z_5}M5#->HslF_EWh^Yw__2_sIJXL^11cPPvPgzm+wj~2;RoVh=wxpBqAEA;uG7kBv z=%nmT-!i)6uEk-3ff@*%LeWw;&)ec))T#(3j!CrZ%x2BCva&tS{2~usSXC_RjL!+r zDJH$Fl+aZN78O9uEE~rJykTu(7e&~?%I#O;ui0!5*gvv3)wfFFZ6c4Kp}I-)kmc8# zb3guasG+x8P@_#P`%{>9P-HPdRbdvEW3LSvC&6oF&CHK+o9>6Bu~ZWEH(a&2Ebn*3 zX9GXyVQE=b%H&is->|E-r=f7Fsfp=xnqi`w`%xytJ?!J z?^|!5wjT5!P>5PBuO=&p8_bNat_JB?d)SGUUuTt4@F+Ao)9nk0dVbtBK=Qo~7%`6{ zceO~~iRyNx2C3BFw++ZceLK}#k8+Kc4H1a`bp_v#`#uf87j~|ilD6MKB!J7_j+}EX zjBmW!Nqgeh>#hamDNj!od0?(Hl@pv>)!;v_UZgxR95bg|FrTbrFh5<$(VSNB#&TZM zcyftDrF&0*@{y*I8;F;@G{ZDoi+{>=%G6uBI{TY1%y9kAh^UNdgA-LXV zADQKYxn=ww&ClIB*qS0wU$}sQf5u2F)5^%bYvhk;zw}RqP1f6ck|;w?L&eu>;t7=_ z6mItWx}zOJel1wP*0Bvi#7agexY&F}#1a`s;3$^6Md0l6p8td;=y;F8LlAbZXMgIz z&_{O7o4;xB9zTxoP=*kMO00?Xm4%W2gb|88NiWWzSBA(pxmQt-gE5>vL(1}Y{Dj>* z*N^CN{|VE^VUeL{{%jqF)`#>lBq9_!Wz3|Jrf{vv@?-HeZf2RxcnwAN9aF$kOWMdO zsOxyfOjzqW|CPRmpx`^~<>i_pF}zn51H#>j{{+|d}%&4MlQ zF@KyaaO9wCnuYHMbuqqMekE`-V)Wu<|8d^B(~X*+Sc7-&97pD9q=VE{5*+1X1Yb?@ z6Ozs>D~qDk9g!2V_E{;jm}l z>fBHFHVW!J+r!|oF*zwIhJYf^!NEaFN{WgoWd(Q))|DRiWHr%;Wuof2ny+CGjea5P z&XFxG66|isuwVh2-!BMr63|QRaLTa)_a)|mOvU3c3<2R)FZNa30M{dn;hbRCqcO8@ zu3Yc_eERv5iroeohV#A$d{5M3xMEL&z<^-F4Q@CEBJ>B zz53m*L&`~MP-NxGksQLXi7;@VHg@hb)Zpw#jCd;-~`B&1AK8oWNoBi?ldzz6|2=FKipQQp{3KO?r zLJSZ^{O1fr0Wk5~)il6jO{0OYkE=zBx!jwWc8w&0uT+sYbgt2ldg`$(blPR*G!>?v zuiKt~7mx0Qp3f1Gc%xRLbgVhB3dlST`W4H9Al2CJ7dM|1#}l$>o&ZfU#e@TOEiJ8g zMZZ9o+T-c?9w4c;qNb)+X{;C)Tq+#)Z~r?N<0xZ>2u1;!_$~nv^L}}?^Yi4mVP6+5 zOVUd-vle_lbp=@+~_Z?Ic=Ic z6`lR$ob8cX#hmJbX9I@dAHV>(sb!;~m0b<@^jQ0uARmm{FB(oCS!@L8r=|H`$7fOi zN`byrb;-3oQ}tEp))`#w;+4b$7JDSy(n-|QbWw}ecEFf})Lb0W!KK|jx_VP8+ek2` zaNye|bJD4quy;7r=F0Au&29r2RIo#(ERv@Bd}WYeUrykhy)%n3X5#|ZioZFZd6Go;KUC%G=AqC0dx;7JrN5MPnvtyt%p=Q-8q8dvB38vn=bbBfFqI7_YR=kQMx*VI91Vi4SofcXK- zy~#_UJ3ovMaNx%8#4&>1{mC-sPA0yo#de(q?9c)Y0b@Po$&C3a+{B;xd0&T{E+5}? zbP}VHC0#wFOnvu-X)rNzCcHZRcj3j9)c6hb+yO0B15(8Cb&qdHly*Js&n@90f&F4V zo4r>Z_o2K9G_0WGhMx_yy#B)E$U_p@`ofZ59ks{L_=U>rU?2blDqr*j&LLg{aRc_^ zf5Q=0xVd4$hgr+Z%U)P_JWo3f>Xzdyb8$EW{!q&P0Al7zZrg&pX`Y?Y1uq_BkHqd5 z;Q4Zm4Sx3{e~oy3vHNOEL5^e>!OR}y-hL6!xm7!v{qv+Fs3HIs4VPkzE%gDBL|?Zh z@`G%CkKMt_!@{~e#=inwHD*hE5dw4yL?BiW@JU4@JG#nhw}3gA3=$w(9go?~v%B({ z9S`UyYCQT>9S&AfMYeu`_)YvFLGw2PB&sy;AtvfXOE~~nrJ!XA3rmv zBF^BSF*@OMDya%ji{ah~euQl;L}AWz8ca(|j620OdApmG6+nzFSPfPStM%L0&9gIl z*H%O1T4lkCu=e>za0|qesPA?K7E)1e4=y`6JlgODW_G^+vd)u*)p@mJ6-ERLa99f| zff&Hrg98JV*5s@6jHtv5oI$Y$%XuM?1qoV0Wi?S@Ece!fUyzP?OGk}L)1+mJpRN!3 zlO{$vxGILqc&lcNU~8cJ*d#kMUz#8h+iIO_*J+W819ur;bu@P>-Ap3QLl0>q6EW5| zqHd}+I?-Q*RtBH$!>Y}aT-$4{d}vU}T70JTFsX5M=tRXx&UuH9MuNun*(YL_JhzD{ zVji6K9QJlQ&j-6Jyhmr>m4An-8oaL%KNC^Z)ze9JGo$43p%y z5-X_+^4drEKW&Y!HmTIn720*N?4|mA z&z}`oP$%`&xKjd`Xw75tcoIB%t+)lEHD=OF(NQ0BPed}TPYCb<+ulj;%=YD3$9$&z03MQ-XR)eixP zgR_zEgA*Dwv8&?~IKxAWb|Ozc&6cHp?EReZE%@s`BmGY|UmG=mMgXX~=$)X=K`=^) z0+u}8>sXuXUfDPUwghaJF_nZxWC%+ocUZP>G+mvlV20w*Utz^tOY^-ZOTxkT{x*-D zJWgK#*_-R0$U`y%A-<>Ej$J~>&z>(t44MR=&fWfT2eV(Te8N_Uym8eLtg}dE<)1~R|GQJo=8*Ez^jnoRQ@9vMS4$c0w^Gp)G zM#OwK$PU)>Ei?W%L=tY@Ji5nyoc)FEI--2xp&S|ZhD=_&L)QaqfAOYY|7WN2O*$Y= z{6=h-=DzDB;=T7oVjdYn4{5A6@0@x>`M9n2{B@B{I?Eo$x?U5k_jGIv@Hj9IEiAqr z@f!z;nFHEM43u#*OpSX4lfY5FFg2^3Ui30vG0T(9ZTr_w#Wq&kQQ7l|M3C0uuDzYu zw*nEL&9nLD%J>Vy<1LWM_-^tqB%@tTKzSz(rsEB2uJhn%RQaTRARO_Cu<(6t(%|4* zG^gei9qk=l{+oAS1X!&0)h7;aM-XwDovjQRR_0KOlI@D9ccnwmNu7W&oZ4Ij2(BC` zeuIz;fU6*W@<{~|7r+Uj095SWJ{0}1EosI#r5xW0C@*mj#L9&-In0#Pe zt!y-#@Vq~D9qO>47s&3Rqn>-vs(lrkeTDs2V?=1b3yM*s03ztmG?o-C=F`g0k_CU1 zlniol>J2ewCZSO0zDZjaP~#epYOw6whqD1+zN1`uj2iQ z&rGi*!>8qbb2;z4lZNy=d8qyTB0ifDZ)5V@7+nwKI^5O}9)Y-zrzJFdZavk9A=)z& zT!sZM(8bAUDoVAP# zXS>F4d`Zd0?bWCAG9W0rJKxM+hY6f&xg9r0`DWaq^Qd|dZBcCf9-i>Wr3U=wu;k>K z9CgR=E8O#g*>6RP9+v*k7*_5&>Y4E8$WR5bkB93N<#BvC#`rvLlhYh7KEJshlp z@$vCz_Co+$QdJF4N~(L`8~~IMiLJ~4_lcI4w$?lH3M(Nqx@LnBgQZM8c~zoS1zT$i zrN&g2(-A%KYP=~Zmsf1qc)P(F2uApZjaki;e*sCU|31@VgC^QI8gj5@s8st+sRb_3Z^n3XmA<9jKO&yk5)M_-+#wp z$ceYM31kgG{3R^4m?|iCg-_q_udQP=0iPb?e@%7iri$qYg6BAv@490+sOji1^gp2E zrXVYOzrv&FN}0*zP3EK?AQkeM@p?w9R<)zmyB4Di8PlFcfJ4O!geyCn_9k`PYt%9+ zf%MLQjRxSYZKPcQ+TcsA8%s+|fb;DNlpTtTifR(Ld3hC*UkZaN_~IAXmSEGUl|e9C zXHV+KRpoIvFBuq$id)Dh1=7EqkX4O?X#YcnFk(gk*tmUsuz>Ie1`B5HWiZd3R09N*WbH(QQO!AkPV0L} z8yZzs!yRPxR7`?X;qKz5TF^tBmCg*BU438(wJ=PbEVY;XnVef`_=dsZJxbv z?V45ZSBY2I(jJ;Mn0|}5qC1o}LbwQ=c2iTgu{yVpW|fw;VntcU!C#5_tg(*o(4>~V zD4#B!o=}U@@7FCZX6fb-k&YbWVSNa^`gFCYAoi>}U*U@|9VFyGQ+0^oRjV{h%1-@r zS`_A2RhAK9F_KAuakz>WV@{_cwZws!AI(!x7ev?%rp#*n!M=!s3jZ+=BemDMwRjcq3|6_$y?F}jOTNc{HljQ4vK&({} zizU&TEM)M_synzRaVmf~2l(>CdR~nJ4$`VO)wE`JCOw3!$fBOl9$Lu@Cc1@qscLYK zAh;eqBZaGY@kt=2_UJn|ZnJ9O3o$Z%#wIDcspJ6A;KRL6c6ObzpyrObKT5gcfrK2R z*+oT9TmV>0*lpGvKu~L(#|%x+vKe{Wn#UaAa2l&EqC|OFw&8F zf^N=yQZ4Ff0;Zm_?bhDd04?B+-dq;$nMx)O;?tExKWw(+-}jp67pbHAcBccl(QM45 z=1%Cn<#liD?6``smVmsT^6SP$2{z6zIT6CpdN0O>=|>_kcE%(S#$hshj6(wc4jTap zOk47_cMn?lqGurW%O zrl{(dc_$jBgH;+0f*nT`kv6vZ$NqydxiBl-3SyV~iN%DIvM?Hb@u~KvD9}2N07IBr z9DG`>K|0aVCWpyY+BK@)tv(+(qSXKyOQ@4_NM$+2-AJO!9%DJO4{UR5G31kNLgYP@ zm%AU4A0{?0VrG-I!#Acc|0K(j$djIfNE~hz0eY-Rk6Ja?@o{}|v z`_zAFHuo}5aBOO3TbkY9MopNFM;%C*a_NhfNV#Qf|* z^83F|vro>DFM%?WJ~!+a(m2m?;9FZ;pmM+kXx7ll$3-M3_x4VQx4%-j%CpqVi49Zr z0JytK_Mi}o-(}BO0`wlJtpaS&ii(pjk6t>jrAMphX*a_lwZ+(1L2IW^*E4m2xRdu| z%^GKpKz+((pz&kBIa-J<*xlU?n6_?5n{)L4)Q{C#4H7h832Ck2wTK4!4R)G8^XjVt z?d|P!Hd%Z?>1njiqx!V{y(S<>yMQ7KEo29a?~q%-Rd@<(!Ub*}Un8#-64t($Y=~1M z5+S<*dgFE_s46MFQ)s2g1>MbnVwSTRy{;J`YTDFaI=*@H*=+OSm9X#Wou9}3OJ0$Q zv3iRk=V&t5)szsVn6N@DN!`U>C!HTqao)*n3=jvE$WCa~WkKa~TvwMIn$3#5<7Uh= zmxI-Q;D7RJlgq~9r4MM;&Y=vs)eCOWUI9s7Q?qfuBg|E^1JMp zz)b4%(O0X=r%42c4?6vvl4}oU*8P(f{K(9e%yPZK2D=kf_|AR4b^n>sVZZnSg%=Qq z|4kiy@fYCD-WtXS32w;BN3>=Ks~pgkZunv8NH72XCVkQg9#)+Y`IVzsQi%)_BJL00 zGXs9_mKJzyT^`SDtEGct6T?@-Alr4ZrU)NjM1!0gQf*A#^d;jB)QjTbOSoqsj|SK_ z)T;ySCd#%l6FpnB%#P91bCRy8`-Ch)AhqCjloq3kN5O5fUIOwnFWK0p_Y1ABk`>Ig z(eF2;37YFPKyik(8Tyf$@{ZPlQtqm6v?u(5Gm$_T$Q}x1uih-ej2jfPfBW|Bk{_B5 zJyf_M#Q^Hnjc!3D1RQb}m2UoY0`=F+y~!bOM!!4({O-q|pyw3a^{fC6dIj)J!}bf{ zP-54xXu6~keW%N*K|GNiL=!?Xu1I z4yYQ@-R17+>B-N^I$1T9SrcZ62)DlbBI#FbU+F_LsYjc%Ut2 zhwpfx$ga$!vyD&t>@2LTmm7U4p!PU%W*^Vc45VUfGlS;B6t>$l?GdurF9Luh!H13rMr^@_ z))4yl^sKCTpm<`G3l{0?dmFzKNVv_ z0qHcpIfmYQoI(NAsId39fOw(HkD?-|X;<9-bdc8aOn_QWan60d=*?-}u-QHK`V{U$ zrp{()eJ?U2I4W_8F&G7~#=#Qak8IMg1I1rG-MOr`uxXVr%|Xq=)Q=&l=wU=!R4^~_ zq{4EFnBy%cFZVZO+v`(B#@Ifx%;-mLuGVKV3D&1{0pK#>Y<}_#khijdyfRrVdC}Ufe=KTpf*6nRTMy=Bv~RtUtRms82@A1 z(4x5a2y|LIPIpR$Tv=z9wDWd(p<6*OaK7va%z;CHJ@vd7FqP}3tIp4b&GDTj0MC4L zd>-4T4DR@(EP`gJl-HWoE^qZ>%rySFU?C^hJJH^JaMhfLMAzor~1a z=WxXdlqkHxKgR~PiTpxckHew4R&#qO(?999qO}`#Nc> zLxwrp>^LFzVKG-Vm*9DCQZd&8e^j{fSP|`5RIBt6GPw9Nt;&0a_dWL_niy2@#Z~Z& z#W>7ftjImSDg6TPYnuSy^H z?2Qa$bblK9kpMYVEP~bT?v$5Vf3u7J^l|d!E(Pgq7@ne>0wD8GPe7MrddD3$irsHb zW3SAjSK=@%O5RGtNq~(MWaD=Jb}4w~e86 zEXi0b*;vyvx^9J3dYb&4--C67!i+RMKO5f(*l!-Gm92vrK)==O0o7fqPt9)h{zkZlhPtK!B|i0nW21vkz8Rk$u7P*TU)qN+Ppw%iZk_r>&K=!E zgl?-p(Rc-EXb4R|fmV!k9{PtjMdS1g=oFYECREjb#0dJ=aK32idKIeF+kfju6_e+S z*-vx&EG(047TW77P0PQrBkw3kl-~37tRm^Y#U!W{rbb=rAGh;%SxD6L9v%PBpG70M z&}loDh~&YZ&!(Tv?w*Dcm9Xfh7uE_eFre=dD?XVa{}Ld-w-UkgX*fN^_H`ii#mteq z0fXW~gG0Yle9m$}_ao8Qhq#{d2WB&6b3*cVD`)r#jAZfGL{gqU$FfZb{VQ$$0{3i) zvXF9e9yFP)!QC%+-hHpB#7|`RyzJT1`F8zwzeYP~PifYhBP_be*Uv z`p{3ME=KwL=?2Ae*XOQW?b43ipMVRYEB{FVRDZbx?N(3>EC5BcPm ziG_KB13cChJQg*9{D1O$feU!7WZFUITQ2{&4c4c5-wR{eeseurao}BPR*C`9j5tn zJtuq;0ef%d`s@7M#Sr(C-(xos8V?Y22CI@4TQX8o?-)@irdl>#!uLY`$p083mE^k(d%jaaQCf6N5H5-cShGe zXf)ik-#=aHbq{`!z%INqRC|7;dNOD}knFy3ExF#3T9dqXolYD$*Q*09bC>Is44hG2s)G#yR z=P|dm!N?Ol(66K#C=y&peK(Z|Y`BYOV44NZGXG$X^uN*+Ts9o8#bneX9(nFyDc#2R z@yeeRjRM_x{;0*=I{WcOYvX7dH%-*D2h@REsyC4z_==J4eg1G-3N&3;?_FMC#4ld( zoVLO*Bz+nAI_2ZHp`JCH6xabw{(?$!?yTqr3H&Qv;8aKo4H=CSPy|kEz8W>N;{t;MS7- z=g1FpYnK@ih2w9OLKXPU{FxEEFIu})((wAJa$aGKVEYGEqTqs&0p~3Fzp}JVk|CzbPVfO|D^S534{Xn=^%b%1fAAf1RUlY`Uu*^ zA`Zc2ExL!lrfIc{!-V*HLO5J|^Tz`S74PGPK!Xda2d#SWWr$VLYDUWxU1z8R*%}Q! zJ$y~pfOFPAN)>6Cm{=O~6<99hEBC68)EyX?<-XU{`S$)WltBkNRnjLohPLC!B=yk02rKDI+*26_E$r}0Eex?w;&H528ek(Z9Efc%30SkWH>55uVoL;8U#djLxcSUOrxk0{Ry>hVb;I% z9fGyV;_^zj;-8h!l?JvGY7d{Ak$sVfg$|AUQ|FXP3qUeQ1VhU^3 z!{2+F|7hpDwEap}dwyu|#5X)KI=VU=N1x zP|eCzNSv2)Ydw;zSHb-{Q#+Zbiavp>iD1n+ z#>kWr@%2Nf8n70NKqsZpo?7>m>WK{y4fVm%+vus^frgE~q1TKqbt>)C&7b+$H+bKD zUAj(#NP&~>K^bE4<9_tSeo;5BYtR7?O!8RB{ZJgyLSo@czJ{kqeACX2EPM5+5pnguNe}Zzyt#R<+{Nwl;1q8pV1|5MBOow2`#qs(-Bg7n$VYY)f%zC;H&gK#* z^D*CnOEy$NEHL*5X5*;&5&)|jBsMT(63yfYnzz!X@nT=)TShG)*r#bl%wyOi1a(H1hSQ67!ihy$@hg9ifE&u7q{n9%`#BWKQ!*5w~sXQeuV z*Lo=h9)=CDmc0oLKJ-k(+`+p{|DH0;(e4Y()AOG#7MlricjbW{W>RCc0$^0y#srwX zcAoStD5;yiNgG+?ep*Dd5#Xo@r#v}7Mx$m#1*ArZUc9^J@mw(HJ9VO3jyjP-gKMJo zcPJDppTl-LcHUuvk#|x|R<;uet34+`5b;?j^rQDh&|G@IyqU*H$$2ayw7ko-krfI~ zPIr!`&s5VI9qOvmgIn#l@7HlT$+vjT-=5hUoKk3Vn}7GWax{#2`neffGu>J7a0WSy zY8nr01{eQ(GXUPe|DO+#SzV8S9HPtN=X|29A7mXsI+!^L{=k<9kO&6W9=@ZYrfy`E z42DSrlb8>$N!*h8Q<>W2^W!A>Dxw%>3zNwj+AdlGrGep_WREvIm6WwbMr}ua{Lq8XSwz0NGs!T*K_R=|-IGkN?U$HP*4sF27Iz`OD-J!e7TuqxVmPnyq4sz z8l|9*vPXq+QLdFqc-}_(St~cCfAx&Tvrpxn-|w-PyLVsG@voC`3X58PZw~WP!jpA@ z?V=mv6HR7(J-XRacu9Gi!;|BPhx1r5keQQ4dBg5+xj*XOI0vPKFxPenP~{4>KI{3| zCPbCnn{;t9?<C zxyU6hVR>4$m$9B@U59+Kx6vuR>?FB3P;I}E9fxIKtJ6M!@071Vmab=E_owY(ZQ$YA zNoW0qEVgf&Ys~?KHK`8lzMOjzTKC?)iD#5a%``dFDZ7C;?7TV3(I~A{RSaXe`MAke zqsdFW#?p;D;8%9`*=aA_+M=hvfrYfi0uI!Gx>Z7i_M}$%H<8^d=}EXna?HgTy!>E# zGbv|as{cC9@5QS8G6r@D+dIlHCe8_lc0 zE|{%ySIwsveoHwsIbi_3`uXm!m4fnzZ=TmLF|; z2+4O=$K>|}v7gfFODy_56fyh6;S-sunQyc=HEw5PWkX0992BVeR>59yL4}bec}m8_ z`>9$7OBVYI4iyUt(ecYuj+Xu<%ls&jblcQpai~rDONYFNd+|41nG6P%C};E|^7tlP z)}q)OLOAUM8KM#$w#ntUr=JV_T-2|OOmNaATcR&@)5C(JYhorlTLtPJLO zETVoPy2|@2h9Q9$RyI7xmlM+#>f@u+H1&OzIo+@yrIhqaB>4EA&G{Ke>Y&c0Why~% zWJ>VUwnzJ?&^zSreH1v)IkYH)O4TpVkNIT#&mTVg5!AAMEP^^&phNYEK3ZB_^k$RQ z=n^^k`$&!nr-LF*w8$GPP%4x1Yr~ng{7Xf^bV-~WWR&!WF?36Fd>+DhW4Spw_w#6Q zRlZ6Qae4=g}5F_29lN)uW3YtCVTuHSA_OS$aCKb@p!G5OTcN+?K#XI!C2(sdXVohYt@+hw5hW zX+hp_aE7N-kHoW(rX;bDlHKJsCLvtM>rf3LG{6rCB?wCo$73Fu=|F@+){NJrI=0&o zyAI%>>4RKUuu@PEOX4fpFS>AMS$f5+`pEQfS@3G__KDDkyUlHEZ*@fs9QE>74x_wZ z6{0BEp9QldW(Q;5!OW3_X%TEO@O!!_ms%xRdOISN{XKZW^+_Re z7rF+dF(uz}PoP@O7D9?)O^>)_GTfhjb00)Jn0U~AJd^(vk8J!$w=@QciRrFX`S{JwW4rHn^03Tm!wkOzT%*7 zRzz~rVmZIq5(qpJ4PRfw)20{rl7ilgu4iFj_#ycGi^+#~%|BL3vfewOm+CK`qL$dP z*OaciD)4@VbeSlzBIsX99C&xJbfnBZsAb@~DFFRdT+Bv^NO^1@aCWfPSVWqGhNF_} z(dYFdcJntG?)n<3<6_@jrC5~|ISa+z*~WS#q@=Cgp?j%%2N{heWW+8zwKVl?I2Hhj zRtEYt`#kgBjv*vHKkwI-i2B@}TfW_FV^U@oHcEv5=$Ihe_7|1#E&vuOX5xS&R$5C5wp}O4By54G7dzk<;ygI@gP-_4HE_2BeMyVWdXZzr zE9et)^cSri5-JJc^8FC~LtDkpd*e_4Q&46+*XTR&Y8jM~x&mov)R7H33bKEUkzTxS zY;2zdQSpRQtzso}$+IuVN46Nas|dHoMi2)w`_}xW3Sz$%|Ps(?D5G zI*jtyGRW6)ciu1t(azF=_Q!j7LSo3wPh{pF)*gR1H!}61BJD7@u+XlsHT*z{V``{1 z9@tOJ_4QRo0)Sj{W*x|U7t*j|-(;n^5y*w(G+}QmkNuoS&R_HR056%R4u41Zc7;|p zs&64Y^5`bx_L#<%f<9rl_-a{Z8EI+5KHD&hx{D=tK{<&!w&%IVDWM%jTiOSc>oF6@JBa?hU-=ocG z2D8_RtHU^b4Z%1OC#hJAprO@3Pmenf8c|e`n;Ucl!oCJQG;m^mz&n?IRX5)KJP$J= z*RiK%c!p%g1Q4r))Cm*}`BL<4kB`C3So0;Ai$j75QhhiUS z;_^S4oNO2q6aKmHqdCzw8Tv5OwTB6!H6*A)&bo*cc=ZIQ_Va$Z=j9n_g8d>p?sWaa z?c!v&rbcK$?`SGVqJ)tUIrzr;~FB3i)Gh4p+g*NVhd!B<5fyXw0iKn!Pt! zM1-Ecb?WS^y2htO0KWcpgoI~d{d6hRNS?Vw65SrPkN<77(qkiZkylazQ{~T@s4>~V zGC-}TcScVY^?}v?-$P&utCrCcgV!B?RP+CKJzUqTcGRkTSX^c^<2jMIK3U-cJ&Y>_ z>bN5~KqUe^I2J2Sa2q}Z-j2UpUmAmmN9&p z+8T5Te2(UBmp2cuWWC;Xj^<*dN_VOPv0y31)>BnV)>t;DvtJ$ojpgOr?8;X7&sK_) z&)@Vub!F8qao(AYt~>an0YFZ3*TK`XFOLSyimJKeJid{O0Ll0@E zi$+oHkC7DT{ix#ZPj=`|W;~7HQ(L+jKr6$IKVhGG=<1N91c=KTK(K-)Urt|7OIg(C z_YUAD8>q@#IOHoO>qbsF7V9xcM5*dL`H&aA(Mg=Mw=U!3nk~Pinho_nn;Z`dXt4kh zI;elsN;SwYu(E2y#=mzsV!4+cXBnPdu8JT190%*vA;eA61WHjU<2tqAs5Gp}Sm18t1~vV5 z@G)J}G5i82Oh3F=Tp#Ifo+?tV?kf2we3>|}-SZb0e|t+uVml7LGI>gpXf$i20i=SO zyM(9xk#lficGRceoNY{T7K^RV6zs2cCaY91h&%jICC4E?mA~%csi$RFd+@hq6yaL@ zt@>+h)I}$cm^JRLqT&RzHpRa;B_o>BmE;3N%7e|`?l#IP-9;^56XauHxQHWT`0&vN z7dY6F-8|+)${<#gZOdZZ;ZXx=-e~T(%fsTkVk`O%MIzNHwE~QBMiaHpKhj@Tm$i|G zDnhT@RN2M*LhF7Ph}<#@u>K6}zt8M{v38bWQFm>-zePd>L8L?&NIaeEvd$2blHtXcD)>pHLBd9J}2aw*v~&S@yr zVC%&LGsK138u->lL_}&$G01rjf2jfie8X{s#p_xV-RC5FG;ftED#CG#w@aJ3!fuuF zc2vNJ{bjz>iu&}l1&5_tvvum#u7EA$e1c2GW$%}jz8JZ#MrY%A(2Mbk9`7{C6X^mP zT9L98wKnl|8%}>2tIrUc1U1UISMK?^a2I4?l;D*lk6WMSp%o(_mLJI=L!D0}pq>0~ ztTNJ74>L5{7$KCz$)4{(9hbTU<;=8CQ4*i}NGV^glxis$wBCi51rqOK_f<{-6k<;~ z-5-OPewg?nUR%?%CxO1mQwvs|AA#q+w~`?XD1r|@>|IUra_eq`t!mK}W0grjJ_2o& z&Zi2vAbT$nrSb7z>3TvHTNTgOOQ;ORD{ZB|6^0#1j2#QN?L>cqT(n%dG7LI!m%nXm z4PxkD*^y6Lr+Q5Wx}#kd#A^U{0#_dmzn-5*p0;(rT-Z+)$N;ye$!+;1hI7{9ijP7@ z1(j{CvOAB|JGp^Ux^TL9or{X6O=sez9t7bptOkz5K6$FYYUS*%J@sdx%Tw`2s(+t6 zjc}9`Ft0KCxP3l;8@F9J!jx0KfO8?y8n`STe%vm6 zeiS`tLXv6luX>gI{o6|CutEF-UqHt-u^H)lq#`Zj?vxn$JtcP>udS?FL$;i)2_KBYeD&%wf!-0AGlX@y zrPFWL4nWsn&#}F;^MupRE-zK28>sK57irBOybntSC_=h2_&#ijyG@m0eD%OiW?1_M z4wY!CFbV$zX82$b^P;rq-*NuGH+lO<{&&*v59areebsW@pS+G1Dh#oh8-(d#?!4HXrY2m-e2Xd94schwa3 z@o#D=*H(X+*5l7Yk2cKS>+$PnK06uzeSfUL&7#1~)dRpjyC3dqb@{KSTwHdi%x6B9 z$2;vzuBoT~m55z#-Qj_@O|I!7Ix6mJ_WdF`|gTT<) zcJQU@X<-1oE3I=U2Q#UPVs=gRc&x6q7 zh54BeS_r=RV5??Dszea-Iqx-`ojUFA^M)8wvCvq+pzDTR5xULpRysQTG~sHwfAgwR zu4t-F;%YTH)(`oT@VTD}xZh@rS<&2mloPlhO&1o6aO>t~2}`h8$l|%8rYrpil%(`r z4*Q<^8D4sCxr^!O>D_krxBzc6YkAWby8!R$=i)!nc5@k5_@z8=^{qFYUs-$3e4U-c zz;$@hR!2*KfA3_eOV1O=J`je!h3W=G0nkEf0=}K5x4+->V&3KAV9}WLnG95Px|C=Ur57Z955A8+l*4 zM@Fe1*I?_11#cH`1-o*O=sl_`rC*?&2Wcq#GHihsU~2VOy+T7p(GGEHoW;ab;oxur6xn)@h-j2=sj)X3 zW3}Fkm67@k8R8)c*R>`CZZ%>DrfCOkpKkkh*e&_(cmKpELRNLs{*c0pMf&8ljVD>j z5Hp^nx`ad!VD67LKIm1_i*UQx3@wQE0OU!l!O#wtgxw5sHWGrYS;%DET z)6-@C6+j?$V0}?BB|wu64E*-zb64I0+%XP<+=BQN{!wT)M?}S17Q+MmE*sDuD3TkXrl;6L(-}LP95GrMsUK z52-!?m=~DXUKHW|F!K1T{St#)PG53$BE;R4X0y-$scJJ_oBEMh<_2_9i(l6|>6uB#u8;EYbsE;56v~9=Vq6v$#M3^Q z&1l-Ra_to(gvD3YE;0rU^I0HGREsH{IL+@}1-1A3Y%! zz3#)5Io^w+ALc49w??$%9{WN3hc9f12(nM0pGStX%Q1Bgv4>8#)-5S8`&5gb&r82? z*qe4)3Tk;ea$?8BV%z0}lP&5_Fx0qWVjzr_L@%qKPr>E1!*&N{bX6t7ltY`D@naU$HbE@=0?&*4PRs zm6er;p`j%)OqERlJUCuIEpMO8mu#Fh_?y0caNhe%BhtDHA?z?$z^LrDH?;^@80x z0ya{Zh57wHAcRh>iq+&sRvy~c(FgkxtT(jqRDALYe>bF(GV@jCKYcdKM%*Ojs60w$wfPSFtyb0PQx;BKirA4gcFdIH zMicZC;<>D>`?G3N$ZY-YFniS>j%W(j_qHdOu7M32P0c z@^Da4AeIR)^~TXCKx8lU!a#xZ-&Z~tOMLs*rI{zW5BYhxq6EtAw2xiZbC{2(ILqpg^nl`zyXpLc$~jP7MjB{J@NTsubtGm>72FCZOnx?;4IZo77q zSIhHIxdR|fRSm1sN)8UzIt%zpCS}jyL@iyM2dqqCbCMjH1|i)|q_qX$!#%$)kd3*Z z@uynv+0L#^rS>Vr>sXm!V2it-*M;BAuyIheE_`S+EIb?m)^xMq_=_O-3nxC*sXzmS zajL3z@&v6ZjVh{+sF5pV_7u`sHBRe*N%A&uVJ2Hw_0lL!D6iX~JwwTx60{K3_X+}| zX59tvlVJI;MMdQW<<06PrPe=B*v6loDSw|=CNA-GQ)0uXp(RlNDD{Kl4WFjQ%oc9^ zzw_KgT?W3+Te|99b-iB7;k?VNrRrh|Y;wqy$=k z#*uZ-akbS8@^D2;1$*gU8r^Y4&3Q$JNscOx1TnkU-^Od%dR%A*ZQ(Y0OFTHiEL>S$ zm%j9t=)P5;yW<@fD$p(rO7Ih@#+0m5mb)<~LX>{iH-CI5sxsZ5^EPx|av-a2pG|qL z!BL#%z^0_w+PF4!-?&@Dx(0|3rv$O5=M=O=#-Q)lb^fYAfn+42R9$B4Ofy;T&;65S zOVhdv8AU9Zu2%MCbSy!r*2S8vz1ZjN*X=m6f*Dj>W(XK&-h)p(?c=fIN^KNIzyR)S9L&FJbAwrCbHy9XN{(lYK22L6LoHExh7Upp^w@GV4Vv#a%(+mYNX%T+zCR*%G?$E4o= zOk;R%xO5qjiGil{HJdZ(`fyEHutEk3^TuVM>Z0q$RYc%RdExwDZ^}tin~~6d|L|3w zEX~tQu=j7a4wpjfwkH+B{AxKEthDE{LH{h-_->mRf&-MZ6?p;GB8_(1&RKiKfRvBY zo@e0`HA`tJ>L`b%x5a*wlyY8$4GjSWLe$pyu%-T9wjFUE&eF} z@{zk6MAp3>@7y5qx)=1y**a&Y&-xa>;|}di?az(Azbjvniek-pJw08K{Z$YvL`d@E zev%^0DH4*btyx7Wr{M-=d8e!6X3gPP#xUvdGg>z=5=&=^v{lJ-eieSXfex@XhE^6? zb4dXRrgt(=z+1J9oGY0JP7dQ(3aJr$b6pOC+gG|d5#LRduAtP7Oh0xo$djt0^sd$M z@HUq4^U8uGo9}H`SkC0fQ{?rZxL)RM*Z6p2W~G;``q9ASve~P&RyY-nkAFHhRot4! zAQxxTs9DeP;OG{sd(XJ46AxlKpbo|gQ~Nr%W+MpR3Ql5~$%D_aAoP--KVgXVx9gb5 z{02mYOQFNc$%y*Tq^UUEu2MvWdCz%r+wIhM5i8FRbk4;`5cIx*srZKVAbFu?`dgv6 z177G8t-}pM<)SMMaH5Loua^rKc|*yD*mVKRQLf0)mhSTvU&}WrS0JmvShTN35$GoNLSHxF!Go{d>UgB^b*CYeh0!!K3h! z_yY(x$yxvR<}qUk(IuxVFZ{l`SQtHA)I3HcEYAGq9~7FBgR|i}(+;EbrA=>_KOWb% z`fb>{!L^-y92vChdP4p@D@?^NO52jaQ$2Fb_&RO?w>57sd<2;vT^)e8#n+iHtsUX1 zTNr$s8of&4-qhX7a>2K7frJLZC?8bG7M6YXqK;egHM4)P`111%s+W9M#5Z~!1wUA0 zY{S%|BVu!e9|V?hSnWnb7=Kc!Os^MCUcY!FX*zO(Lks7w#wH|$1p8I`I-JIgUe|o$ z58{1`m<`J*_O<#!Kl$+tz7n1lwXzLqoVUOm{_i!GZ=rbEEGBI<6Od+`2ccf1plI@r zbDakJ>IQDt>|)#TMqRs^Q>&@6->fw&iydLOqidglQPMH{=`t)?8L+1RJ%sAesv~}| zG&cT4pP_N9&HpuQpmO<-RX6%pTuS4t=qoMK%#3?MS?}W#o|tQ z7SN%T8{qin&70RnDc$|k2h3L5f8b#w&!#hti4Ln-#7&P+la?ncKJ6iL+Xio!k@8Fb z@_CKer>c%;9ewjU5BCQ^h@p7L=X==fO+z zHM@Hg64XyQkx+Y~^6b5#w$nSPP$ws>O2OZ6SHN|u;Tv*pwli7(@R^-|afgzkeJ_d^ zBJmIPct+#T23aE9M#qUV6sld{1)5wf)~LE)ThvR5hwMU*lYKo#7u;uAs|`=C7}-@U z=giS55BiFL4E2cAfe9w647Wd0XIEEjiNjhqM&#;Vi?Zflt$^;r?*6Xi$qCA9)|9GH z8BK%ti?*pw(N+dJ&{7>Kdw)b&f8tdHCUUA;y*mWuoRE>sQAPewsH1C#PTkssxLUhd zY&mAt=My%0O^fR%It`vdhce)5!^~Xy6@M6cD@3Wq11=%HSgl9&4DV3U#JR=;XHvn8n1$e#b4jtvYpN^?dC~5FX$t8tIX=yd<1JO-svsogWo}oh9^ejZP?QzJ|e- zRTxroL?~9GRsX)0%kk@OxsD#%Kq+)ov$A?ItQ3CM9PyqE{JwV z3Wjjj2jWRQ;H?FGg zq7)WL(IWv&(NBbfV`H$C(?J{0crzcdUv%{>8cki$6mzgtr}6i|wRE{HAt#B*h+x0& zN9?MSn=>bHs#pyAtq&tS zFmg+^rQm`cm2($~(+}jX?~PB;A^egSZ`t)Xt$9|TQxDvou}8_?gyWxT8&X#Szp{(D zv?0PcWAnAIN{Wd^mu!j=MidqStr>=U2zznPOR$2e>@36B?!Sm3MD-|SurN{me$pc< zEx1HYADdirg=+hC$yR~g*ZRr<+7hNbVAB4AJ3Fg@B$(x!N~`zNTsfaYexqkBvW0+G zJKdgl8CfA;Xx8MR`Kw6=r;X;k*NU9TQJCBS4%@%yuy;SA!!7?3{5RhSudUo*^cN8eNsYcam#FxBB{elFEpNfXOTMFFAmX+B zranJ+fwQrWTc(X5BGMh@OW>+h1z9@8 z8-x}xUT9B$|8=eI?Y4!k&lVUYT`vivv)G;Qa#e~lHv$mh{tT)0cS!y1%^l@^9DmZ= z72q=b?;((gE=~Q5WDyEfJP5@DaQ4SIZ#4#A*yUa^>Wrtj9UR3FRw;loGp*cu)#UVD zPdo`_*Ai|^Omn-dS{oTW_nLZ{8M0Yh(KnwiSUgjC-J(qv=h1JtI zdNNO1PDaY%PYg18_EVoaS6FW%a%pmMnPP*BGgMXZ9i`#wd);3gR`b@>m_F=6ihjfc zdnAzOJ-t0{4l|m$1^-l$w0G%6{G|Km({WYpdn84rQZQV6Ze!Ea_#tu%z_99be)2GywHfw9wuYXLZjVyn?kXxF7+Q$#z@XD~ zI(Ne7eO90_=d$}-=iwzIUS=xAlv)#{Gm%#3iXaCyp0F~9wsWTaXP(r zeF`KrLOgzxCLx_&xL^ZHdbzhEAzRLqo2~fuC?dNTfaWFL*BlA^{8!d}OKpn1f4JUQ;k}0uztMAm`nO z;c2whL>ENZFI&K1;jzfvnH!!pAMn}jsffuP(78V7`dqGth6!mQO%KZ)1MtUK-S$1!$ z5;AyNGI+y^7+1xd+yr1Y6!qS`nyWG&wVz%DjAzz*;2=&o zzQFPoAs-D54V52lHLlL?-SSXSqVSE8C zD*R+;@wP-P5?8uwsY;lD$)0tu0L1;OA1>SQSC|ZF#FiWOW?)Ptmap=kOb<1NxSOIn zx_{1mFgodaXYeIXeTNyXc!2ZWmDY(Vgr6XYHu;wB6*@%hfLs$F>I3e$-F$S zufWOx@f*uy+jEJ#vG4CX``^CU8vw%3TM^vn+r8`VDPQU8Nyoa3(#4B==aap(@&!~J zsYcTO;}UCJ>5JMy*`YaKPsiAHSxf6h#ahXePnVB&ue!_x3Tj!2r|LdceM@eRb=Xvo z*VPf8Uz<~fVa&S zxPZj6*;mqc=H_H9mRUb$gb-S@DL!PKQok#OsS-S&tg1>a;7fJhNo4&Tg@al4-IWtV z$?441v9kUZfuXwJN_djkqx&c3(I7VcGlmAlci8i_5~t7til%1sPwx0yC34#_Fl}Tu zM0{i&CwTWQt$56DzKZT9u#CC!WF_Wy8voB^GXPgsI}!qe99Zpb=E)X{=QL@O3mz?E zYOec|#<^-fpJxG8Q_jcn=nvE4?3$`B63oyVdg7+&X*2)Li8Y5h1Sn|y9M{8`s*M<4p60J#u)jj#xjQDs(CK zf8sp8#FDE90MMrKMD_;2aVzrEXUR>eZqNW;v!iCWBVL~ckmZpt9;bT}pfV6t()5$N z;CpFf;O+Hsdl2&eXU}95*N;+yB`!4e-}5H197|(Y_QzGC2j@>zr1o3TSlKvy&MeUP zXImGi19NjJn4O&u7KGPoTfIJA9!Bq5w@VCWRHlNY>*g#|QZ6pz!KN!Pm$KjogKye= z>U9kApVU&vR5k^@#Wd0H!gJR}X@=I`-<@qYUDxCq*%=@2^oHVlwzW}BOlq}L|G7@7 z`MwACTS`NWphbX|?ufF;9ndQGEYP}Xh9d8qil3o~sZ6N#u+(Vj&Q3hb!Po{TFcfNh zaYG(`ch$< zmJgLv=;c$uY*$V>IpN+{*2U(<2}Hk? z39INIeO&FwJ32yMSH(MNTbTn)!7Nnc(8|cCKU#_vblm6-S~fn_w-hmj?hx{fExyvj z3|u!vSqBa^-}B?AgnW%(INs`C;jtmeV;%a-59I`4s=V7UTjQo!ELQ#=4t_XV9(if6 zKi8IosB13p&KAO&v2}+<{@2X=!~igJknwIlU3lKL!A;TXxHcAFrnw#`q&_+GdR)Kr z9EVN$Lv`o`zEzWBAMO*`ULpZ&?C*QtUw8LMwcf8k`nU$cVRgr`GtIQ41f2>q_g!7~ zC6qE88vf6}`K)gY#wKbVsMvoxIrN;>)P|~22@uWi7$?WsFNbb#?9lt=eNLG%aQVQW z3XsB2tupQP-+@tW{*JX?=}a}-Aunajo?ixSc>XAX@Uj;KF{di0?Cz$NdY3{ntrJARH=2aupWsq`<;PdnS6EClWuU(aFdUWe{@n46MaX&vVfbqmla zym^6#@x3zQ`5771_K33jW4?f*BY(+Sviz;WjLyQ+Wrk3`Zd?md3mGubnQ|t-?K!*E z@T%JKmv1FGvCV9Kn>B`Rob3VJHKu)lBccruu@jJtN*%Rc%04&H~}H_ymD#2 z@oMAtH13GS>V&>5HrT14({kZXq z(LxHTjTCpjk0_-0)uR}nOMNvRnkF?6{I$L9SfaRTyH72tMlPT7WLsX}U%rXkO5kE* z++5MLF5sU=;BB0ZbxD>=l9!`lv^G8FfosJ61qN5sN10Muf$P_W_tLL!1Fy$nNSwYs zEibn(Hq+}h@H(2~3?w?{s9H)gO+5g(_k73DNdE0U#j%L^NVI^rJZ9{?l_ce*gmZF$EqY4Tm-xz4`Wl&1JVs;L2Q5^0}s}n%ZR}xGrtfwxrtL z!wDy)gLs_su2!*lv?Shz*%B|98a|wZ7a~l}R6W4sc*PC5daweK;S%cVTRZstMTvxf zYcX?d$k4F*#Iu981Zrq;WzlbkbqQo+stNUpACE38k^k3}1)X%myn1Jq4p@>X92y1f zuI{lJApUE});&{awdlar|JAV`T7|B75lBP#4(()4*Onc)V+zl69&%WgMWZ@ z6<^h}wrhJrv|@+1h;9P{%C7vG{nSf&Op4XTm1IKRrPEPPYMH3x!z3cZ(1P8cNUG54(2-h$+jRls@(q zr#-JYtQVTx4lMS^t`<7X8JPgF$y||GdGM zr15;;Ol9EB7r7*lJDlTkwQ~K5;TplA}>Xf8D{k`UoipTSs(ea*ne;K&*GoiXfURKm(0w5f&UFo%Y7*quN)K4U2J5t@edcOjJ(8K&s zqJCDVjO$tWw8LUy$xpk!qR~;4!8Bg4+>6arw}bI9`2H0@EkA-t$^k?D(+2A4V43*tTM8y<_l;XCkt^Obwbf$gg2KBYQs;XlbN@+pK{z#($TROAV1cX5grD+un zq?i1Ko$OL`_Kv!&xOgvXe8((!)aS75^sBTB8_3>OEm1dR>;sywz=J{Srp{K_D=fyt z91L7~k?v3-OTDnJ)}pG+HdB>tQEpnrj^2r2l}h+S|fRc>Q0>-k}(Mm)5M$i``{6JUgLm=N}$V%f1_ZgZ7};d_EwBeo+U=2n3K zQOlH3SB;p>T9iE62<ubhl?!7-7@67s_Ur7=qSlUQxHD-rw~L20R;0xKvCC%ahK|py zW~zJ|+Si_eXAf4-Sg3M>O)T^9l0J3YIa%FF}X3f@BOFRej+#r`w(x z_wG(QUYbhwATkrAW%TZWgKDOFHv~0gKtoMev}e*N|CP43hHQ9-c+w%g_c1NQF1MmS z*eA$cC&bMcH#AxZ+WY3to)0}L$?tsze=|jEK@NS6MuVo*8|p0XPLQ(k2yw&-O?YN~ z+$a04KCbwkmPaTmj^n!0A}la&nTqDI)2f|&?6kf<{n{=H@?MVE#*GlNRFwav?}0zy44{TjN@cwhfg2EVr@{!C^*DiSZQKEOcg zB6=d3cu(0>`&?)^uJiYc$3oggy|qPj<#j0peuvY6q>S}EozO379H0suZdiGYG>oMFa+DOvJ2Fw4rl=selp$Ehk6mAHu)YLX)mopax z3^aeDcLXxaah@2coE^5;$3*RdTo?w?qhPGUEoY9yU*jKxo= zRDKiUUV{rKV2M@-a_jLAdaJ!X#KErwJmrf&GJx}X5G9X6rvX~HsCVw&ACcN7qU@|o zutb>$dL-4{y;x6ibAGKTARfJtKmY+lC}+3*@S6X)pqzDu(sw3MAa`K(&HH`8%}6Us z0|$x&dF8i1ytSr1pKQzrk};b>?p}s3w@dV6L{v?-)yz`h8kIqMrYMrEC1=oIGRK*b zMkMGK|4T08-qP)OIr9WKs$OY#!^iHpT_o_-(;Q&M;S$CcLN9cWbM^)zaaBx&H#b74xgkh!xi(zMB^XhT~$$+nSv9EjwHk|;^nxJ2D@omX&53VCBTM|#nP2N z1r(lEk@#u@9c{V?((726m)jCEU$?mtRTI)5MWR1BYHCR!A8C@BrDqPjqk=(%P!s(s zaRS_xbuHkbo8AA(3{p+= zl=kFDr0a7qbTf>*g)x~OHC-PAdhAhE()wjHYrDTOSHBM(@n{VNN6~tf9|*d3ujCNb z?Fd>FsZz*L@LR7~@M}KJkIj?)u9dvIOK{=e24mY{SGYW3IL)z2sQfI%-F*GR-DY`Z zQ+Ic^^1}C$WizE-2$MV~=WUntKVtn!wR)$Qs{5SKWlG)#>;hw>i-zls4U-nckG`ew zHjNAr0&6p~AD-MaCfrrF7J+x;3Rw-;xf>>hzmRKcs==wYO8jUnuM;s{@lV3oxgm^pp=XbI4LN3WXg71hwi&A}>e$o+;TxU(4JF@_@y`IGLy z8E!KP|EA#md!LK^4HWi?!K+p^9q{qICv#!)5O;Cs1bG!5CeJa@WN$m+7ki;i!owL- zG(CTj$iD-j*|$x=VNH!oZ6DTnnUO=}xu!EgoQivc#_i2KfyMuD*C8W=M z{gLvG>`Hs{(7S^XEo*0o{Gtnuh>P9iJ<3wg(TXSR@NHzA<<>hDNp2YO&q} zpcCDq;T~6eTqm!e?Ye6;#1;C1B01RPCYhAc&uwv-y-rfiaA1Rq{9OU;812$LsQgsr zr1KkI(@5*}HCC{oJ#Jh|%zFk7)}|Skr6`_#X%1F)=ts0ZGziy3uP1@=F#I%eo}&iBSqa{$QD6=r8NY_IG`k_Fm2b`?l~Vmt$}{{+$O& z4yFvM_xl?E4m*f+{@jsUaj2cR^1^$dqvBJrH0b5sKT`O(j>JUt93DBW-B%x)*TDcF%U+pVOzD>cVncSWAF@uE= zMdu3#)+^->**m{nGJ}2};y4;yc0pYSFwr?QwO==l@Up&Vi55a48Cdbz z+ZWvPY%snP{z3{`RU)JZlso+LtXIC>JgZ9 z;ARN;yqO*CG>%a5*>L`h2yx-iXY{?9A&)M10Pi1nmnU$CN#_&0!$pz9nbPocLZc+{ z+X6i8$d(GIA7elPaHq#oGAlWBkfVxXf@e?T1}18Ub$`t%`!8V&_KSy|Kx<<1_cu zPW?(}B-n5ZZ54m5^Drk)6031?()Y9^k+aX1b5(J?G&H}E=}d1*_-K!BaIb?0q2eEq z#P)$K7}vA~OGBHhuhIB~w(=UF2Mi3QKS&6h-*ch3OaGOKz`M}e&{x!_T5=9~ zi32Ocu6MgS9WXflL@WS|vsoc{iBI8u(cK{RZfmBhEqL8g0rs-2kdBsu)B#_U4 zCaidd(O>hXT;wb86YJ^F-3RnDF;*idI7a zVqaoEIe7DW5H(b1z;BeORkNRpq}_)nq3lgJIq|GY2YItwO?Ot9nIV}{>qViVF~X^u z#pR#$mCnD{i4_p#9!r_f-(~34@3NYC6>GapIMept2_%CGyn4R<@b)T zgxl@OR=^=KyQ9r?NX6gpZa!RO*m$0RzlC5hEStg$`-dnZbZ2rj_yy6?-U079U~vTQ z=MP$sdo5yOL3;atuaUiVTisbn&iZ;S6o;s{0IcHIO=+01v>;iA9i?l+h_jL=E;I1S!=Ep9-n>^o>Gy#_X6up~z_ z9}8z)qomL%s*CY8kxqpr0>gpwFV-fwv3}1RZC?GSSHmt$RIEaMX!!Hd3U99%NfXs4 zh#`7!_^#mNIw$3`h0C(5zVUy4otTFG#ChvOa&Hr}`Ej%%Wm^U{_mt3_%kc22OZmAVN5qch2Ldyv9U)3LvbGlG;7^#;7)&( zl%ye2+!s)fh8!L$tWs0cG$+_^92YbiUM!eepD|)uD?9!H24f29+?(Xu&Hg-cjA=CB zg$)!KKC#vD_M>_LN$Z`RxnIB)tEX}?^+1`&F|)dQ@2m&1>PZKX>#O%@P?EO1uCP1t zF($pu)cS+po^#}h6QBNJx;8z(-(7I|MeB+$9)j@mqpU2w_Vw_heQ8SoYFL&#YE1{B zi)xQ_>zQ+x-l<;tu=*S1O+g6>n8VPS>)LGOU?S6~5I0wSz3$~of@mb(<`3B!YVVzB zv76I98&{3~8Y|tq8DUvT^q_-TpCyn<(>CpZysToh@;8@BFzhI^X5x7H?CTqz=t;MK zf*RlIM)n>oR1z8eR#QD~UnN0F{-~lNTk(R%-K!VXVyIn|DAe)u=ny)}Ipk7N!<|AK z9zT;AfJ3r|@jU*H;sr?;_XaAg%i|n^q(WydEzFX*5l?=&)jD^>1MgzE%&EK-h{yu zhOocJ%m01K)@*;)@cB%^Z}p$XduWanD!KF067T{vwc28MQt8D_rqoCBpU;OS za9@kLZ%l6@qT_8ZAb>dJp^L5QyvGQDjM2YYw$$Ot5^LP$ar?I7`w+a&8Pw&qW@OZO z8DYXY_<ZI~> z{-oAb@nrVLVMIUQ2wwK;xAt6p48ws+XLiN|xmtP-j!4e*tS(vZu&uzkCrB_`_VCwS z+b6o0xTdJ?O5(DzgJ7=woVmsOM04?eD=UtGUG|E&dc?5Pd;dfiRihUp5g4)md%jS1 zm`9c4uEF7gUuP4qsqsfyw+H6!eSsZ=3+m>FG}O<;%KcM(r{b&L+HA={n|}JD8{xTj zFQl9l0)I*-jSN0wnA$xf-w2E}kL{79ekpCfdZ@f0wAH|JyMLIsdGOF@IM~~p*O@Xx z`(f~pNDS9z$`ed#(o9kbcoBax3^F7P30{ zop-#YZbDAr&v;{DVS&I|F*Q5cac8B;{B>Ts55_#Kut!Olo~M0qi56ZtxG;R=Q+JUK zrUU7plO6L>47q1q2B(}TWEAX#d`Ls36mlm6J&3u8WxaD>_pUu%slu2d4haZnr-8 zbJ&l;FzFmZ#u31*wTl?o67HoA9LjG{&0pF|2Z}|Imr3!MGEM3io?fBg8nU#v=+P40>Ev0P zT%41%2hZMp>5X;#9G==XwV#r&(R%A{c`Jk|P>56cfR&an+I)Sgd%Oqj&ffw@k;q-U zZ}UJJ^@#rvomB2xtAjlWj&-RZtN23nJ---ZR*sBff2I<56jG@t+lU5fmebc=^Glei zLg<(}!jyS_c98U{ECwwoV-{&%U8;*C1mu}c@2a-guTvXXPu%A2&wIF9Rh3y8%AHWi ze0QjCW^35$N` z2aQvS`gm2aDsBzMt1ix_%&YrxZ2L_2+J7_WBY$fkE&sHC`MCOjACW!*TN(?3R%u=HEeWkXYz8$AY3% z4suUcfwZQR=8KL2nV&R+*AraiS7OxjL?`ptdgn)#h_V)G3R`a^O0yV7$_={|7>A$4 z(`w@|qeycagjm2e-RV#kvbzd1D|Ab8?{VL{sbJxH&ow%7kk_~!!x$I(iW_+)LjD2g zD7kIdtK_V-2Yy*#Fpzz`VvrM^-u@}-5uUj;NXW+`tCW_MuB$YMboipqu#Aic;1py|DZB-ITu{og4oNajqs6X!wgWeTk-+_IUz0LsTgpiC)2A+3 zZO=5P72Ew=gr|sJDwHwTm{M!a=)B*OlC)h|#_Kg*IxDvgu8}<1jdm{?b_{xVtdn66 zp8S@V3tlBToEAR&m(RD3jwt(wOYbC#cc|uC3^#ub1y6aqa#o&b^-U}s6qU^<)5=|b zQoO8`K3Gq?Av#We9!S@xE0cP22oAyBgS!NR z1$TD~!GlBN1b3IFtM0?8+67O&d-vLFO&N2{Io?!E`ZbFSjWBw+ ze)r0#2CCMjrm%Y+USAj}ieg+8#a@qR^W`9CaKe}qJcurd-HPR~*ql=PK6O0#&Bu7( z>9(0?2?Ue#P$t>+E-pRoaN6Z~Zb*zIR5w1@?w_9}SDcR~2O=Hd4M(thrgOn=nbRR7 zY%xS!5ei-GxyEBA#21gJPq*z++x}L3)?%r*dpn)jDByhYNFpE${~o>JG*`6l&xy(VE}=bd_zRtZL1(gT8h1$xSo3gdBk|WM=}y{<-U?I z;CK{?ivx>no2k0RA@#D$t#jvSGn>q^5K+cX;_Q~e(ljg0h ztyR(d_3M||kV2+_%RR`6-d`%3_$r)dFV*ifx?bt?vj|yCcobjq7jSO%IpNl|zT&ef zBPUbzeKX?G_I);@f@$a-uSw^XMrbr1o=I#srlL|(oNYxUd=G4!;q9rDI&w(bNy>*I;YL*{$pR6M%ArA*3h4MbjkQ=XhqkY z_(Ii{Z^ID=8oWDkrGjs~c14&Z{?#^j#ItZEIwr}5sudnYaq~cz6QH};_ULywpi?10 zDN`~hn7)(-lHoP-W{P*|4r#gM;`O{tT$DdtOlGcik>6of8)%V+K(OgMZr7iainU!aNwAR>B zsRSN6TP$m&(huIql9<9{-@b|sx|#L8+tY39mG*L7qr`oS&f@DR&kNVQ6FmPCB@a$7 z57+E+IuAlE#?NZ;8=xpCowj&gFM0D%hXHYuTrYx(mspm|uzx!ZK^PO_dFXeO5bW~3= zYQwW38nta>SvSF~o=oTs%$&nQ*p4UG6uiG0(leuOJK45Z;fB(P(o=N#OqVrj*TxKJ z`!NHLZIO@~iR*GDkij7$4BkK4`RcOIpJ!4axIbm)C$ISLp9NVbKgrgkf&LhBxpv)R z2OqAYoKEf4$CLjf$0yZLCkoq$b=$z@_Gz*B6mt2qF8#LKH^Go?U~M^4sI8=Ar_97S@v+SZ?P zyfduna=g&|6e@s9%)2c|yzBu?**QBwIYYsJUYsoG_p#`4)6&x7wp+R$7=Yf5zHq!) z0e0-u-HE%LZ@3Y3)g7;!aS(rwKR0R~m+KN2(W++*S=K)7HWe}t4-YVb+P};kKhn1DsV%F0G$^z%&3RBZ-sMP$VnXaZVoJznX^QCsX#$l$UtGh2p8d{)eA@tJ6o z5*Pn=gH!^&Bo>0?TdVisYHw$xD?V7&uPIs&3lgyvGEb)Rksuivft) z!?f7g31GrA3ejpBn}xk60h}rwr~Oi5T-=7j;8$oBy;k4z*Qc!ca&2AJ*0Ynv)<)gN zQ@vJK!0SYD*)=I;N*_-*t}PR-YX92SU88lrUzsWbACM{qPqT4wAUDwN9IGjA80j8% z=jR*78xrH-G&=89u9tp!1ZE}1cRvS`oO~d`!BI-{?@-+X<~4e{a~1|Kfj#9)#N?g!{a1e_WUntHCufa7QhTDvNKF-Uxj5}r|tcv>R|c>_tPbbGE+CWHzp6$dEpj00)3^48)?EmIh6*I0t`#n1R#xtASG2fu$^<>$d zrt0-NxGJtkEEMJDWIji5JTk4;$KmMpB~Gu!Z=%#W1(1h^;p+olS&7@>gG!ZHC@P$4 zc3KjL*GQ9_L)*(vZ+M+=7X-(?n1xACpTer_x|S%^ff1^GcELQ!-EM_$o1K@Jq@11)yQ{~oLQC-Af zIeNQh8;`q7qSstX?b*r|;@rL(rzJ4_20GiKZhJZ(b8>GlB=)_&5PH7w`DFi!&)#G{ zoLrGf*5%XS{5{ff;{oS0x8eFngZavqY5)6yI0AewmTj5U6Zb!6d|ykH-mDI$%rp(0 zvr0{wn$9=9Pus?I2bFgXydGw+#ySER68v7oH2S}QkhkO@{|6BA8YCDg-RLSj5eAMTQM9i08CG7!t2 z*8uGef+fxu(4U$uZ=zK9hmbx69*H#SU^R}K8b>O~P*jrUgI(Q0Z|$Qmi;RtbLd?~7 ztoRDxRgfyJzl;XHi#S%_FB`ZNqPTSW1#(#RF(Cg<%`fmBi(X@?Z2Jo)#Cqq~AewPf zSI}5A9#=O0?x@>|EkC#Hw47h~s_uvku5+%nAP2k80tMBOUv{YIb- zno+ClSGtd9U&e>%2gFKbM%sO!JUPIxW-s~l7#Qs-l2$gS{_A;j6$Y2z+gy^V6rbOR z@Y@7@{?a9(BzPnLMYy_{we0D80DPEmEQA1L2tg(I(wa_;{D6=z`TU$bror}w%p_Rr z^y8(0w_d?UkJPu?P&|1H>74T8<9U6D<;`MUa0orS050r=E%K}FfXJ~P3eoFv?Re>U zD9TeHHjgR4(G`VV1U7T?)v0%7b7fwV3+~9tLd`VCJZC1WD?Z+0qXV#Ufk7p$UN_bl zu(y?Zt>_+)*J-IL7xkAjK<(jvf_%e=kb=NFXxYBCz%jwxpZ^Ss{s6Qu^Gikg-h<0u zpB%eVr_oOU5_jr-ZR)yj?gB@B(}B5Q9gAXane&-1D!YSY?E}SzLHp-y|2(WYhE!8S zO4qd)RKeHYg!VIW458zQ4e&<}vI9epX&?j@5PIb1WO^NlE6729AS{JEdUq*p)x0&< z`Zw+~8`eziP%oRujflHB;yTZ|S&SvUfJ0@Sn(^<15X9uPUD&5ot*~ziWNyD6(sN&v z8x%o$zFj`+T-~jor^WgMOs?AvA+WbydnM|Xp~GuerlDqTyLPg+rn9)Zyc9Z~yvmPY zesJ2kMCVcwsx4DA{zCACghTVrP!sDVV-E8b=$i1JIfV(`d8^10Pafp7yF;x_ut_RCEr$T_Ol`@X^$;AzO)4RSAkF>$;Oro)WR{{P;~N z2>|yuj0(J8VAFhGR7W zaZOo1ha+#IwUTqy+mT?SdmQxLmZQoXR1eBY99Z3Bd7-6VikwD!rsQ_r&YG;M98aJmv zQQ~TJ>db}CEzj=879e%{-G#?0qZa$HvjvrXIbgz+*n1t@`NM>R8JKIn%jt0=|7D;Ou($E_ht< z$)|RgYf|?%q#XT@?+hF&9yDZ1Nwf=pei7(mKAjL3_i#yXx9>TXIu*!nHwU#i_4EBD`eKspRP2b9+=!5l8mY^f<4*hs7U<(lu1$$ zwf_PppgSNsr!9TC>kUtkz5*=EdIxBxyr}N_n!{#pk?DPpQeyt1^X@{XMq>~P1qjIW zh zot2H#aKb;@+Kc3z2jl2WuH3A~P5R7a^b`AI9CXqj#13Eyr8=RbrWwN~-*q3lU6s2Z z{~3%eYes*$@;+^L=!?|;5P~YWuUIb@&RXI@^8-%n3mzr%IH0G(X|950-^9-Q`=DLP zD)8fyUE^9>_N#!8J{r>7y{xEkas3VkpQL=(mW?sch`QSidY1dz{Pq=Y#bm;;V=KCq z*z?*Ecw9}dIo%`X`=%W@~jw{!SkKEB5 zGXcraGj-0jd01$EAcj+amizQry6Qa$>07B%>gb{ECm9G;8>?&@q~ay^5)Y#qA`V!+ z`w3n8q_~cku}n&e?_8QE&SRv%+~c63B84Znic)s6N^A!Mt}7bXHW-osw_)j(?cKfu zxQ$;`pmCdpYGxYH8l#?hRfQv5ct2CH&hHPTuXao2JPur5DDN`zlj1+|+IPo9vz#0g z*sb-B(x%g$O)v_wFQN)O)99g)b>$!A%Q2Zm*4fGz&#Z^Gevw≥Fbx+wk|m9$=1i z)ksC5p#52HHQmnQlX|+eerN-VXJC45kFYehTs)LeGbTse5;IgIx+G96YU>8m04_8D z-UC4ybBd)Wv~O6NceS>ubSPFz-|i~?MCBf8W`LnW0NZDV;1iX0kHh*zoK7qLVqK4A{DQ_o}$*&)bM9X5qn0CV@l3m zlw_HKf+LroL#T27q!Ub{Llry(6!HJ@T0SX^N>hkjKUcR*_shse!0ZtHxhXQ?ylX=_ zg3vH{&a)qMA<$$sZ7$fzLJHtvdSyxI@$yEpFGKWIOJy|XBQcg&Ub@F^*5Xq&v@9sn zt4>EJfT;Th8szp*SWPL3{&V>KFZteyaA{>8*r{|6#)!+L%m=DULd%8Ph^NvHnDJ-r z^ON50pT^+MaC$$mot8LzRrPs2&Df6xIr^bVW37-M!6%oPqN8iVS#jx3w8N(4}VEQ^{Oty%{&uFeS zpQDx|e9s`#NMS5*V*Z0GAU*JnJc$d^^6%_l^H#5KF&cgfLFtRYOLW=X+@hsUCHB2r z`IJ*A76=d)0#-gij|zDEeO8S}afOdi;eXyY0piZMr~5l4Ga&{DT8UMz#CIvNQmNRc zPz(O(X?RJv0$!0frS|=aw5q9l1M)Dds(u>64R-=xJ|2Rvn7hSe%e7_TO1};|&O~wI zqOgWlAU;y>j2;i!n{XI5>n{|8nQt}d-OGzMlo?gKu%2Pi%M-Pkj zXNQ)OHLgZ^cUZKQ`)a;%!1;BlE@(Y(x3JXrDl}XZeg!<%4jZOjwoqZTuf=B+k7Mn6 zdHLsxIn}S7D@(0A+Sz$*#ljdP0S99~S{_Np6#Rwm>M!o@eE2`U9$!-Jv2=vAb- zunbCenem<&K}9$SJhX$DL6p@*Wktc$o57-#d9G}q?CNibop+}zngkR?Q$0BD1qAlJ zA>TgaQ_Qq((y7t%ntpSxv#g>;fd*^*g)Rxynnx7LII5(4=ANTE4HdUIU*a0d1G_LnDpea%6Aj zkx5!CQ^w0q$ehBSO%<)JyOTTGobBtcX>B_h{TAea=UlNZPB3)+a#Yb+AcNnWq~zrF za}E{)5BrFGRTWCLpnc*OhXYIwBXX6)^LbBq$!*=4i<5s_FHoD%$oeZzb zK^-;~x&vWQHmHg&o5+(uS3I}8WSLFxhS3~r>QH)U2f$=xPV@2{~sk#HFq=}on~ zb{<)V!-(C0_o3NmK%{%xxYVJ*F-EXd?ulq_$f9cX6isv+o#>NlLKQmGBbx^ly-5UM zAV)*kqcxb@#UIl??_O24i|wYdx(1dgafJwRU*TUynV(9Ngt!>HbhVC1kvLT9*=OOE zn}W@T=jtlX+Lz;wG1YI1rqS7$KjyR0GsMkE&aZX$#yWI7MbCHXL!HPN_^%X5jUVCb z#bDTY@qPYn%dL+Y89T_KgwT3)-qFAr=i1#xZcYOO?O^-);i*~*l^2$C=h}#=wOp|F zH>v!N6(y1R=4Vg!pj_oai`TNMhMJUlVz|Ei1q!x($sB4+-M~$eJvf2yUfJvgqpb7y z_c43RQ=`Ht#(a^0m;IqzN7ep^y#v9mlr;I=r>=^DadW%Vt&^Ca;U zGzP)>@1Br9bbq^=0Q?AIF~Y)YiA*0*rnsMB<%2Y$kz-E0o@OV{Gwke~S^rWzw>k`* znMM&Zlhe)yJM?409{0`Kae|p6>NdSiiU;91`w_<7h82jfAHNbaE9=nh_V@Q5fX*xtc1i zrv33Fe3*f`BHYF-SJWEWK#X!?HqR>YC0{r1ZnsXaoI0F4TRLPOjn zBt)ibLO}%W+APYFrFt8sD(>f;#+eNOIblpEw%yoDa8Z`S=V&q78A{5$k>R}u?R+=e zE{*u%Z!J4zbU@duj%|_KcP_(AjgfHAAS#T@Yr8O?K&=YGjmPCYModuE#U0?2^+@ek zPNfdc4sykFqDLiqm(8QG%a;4qs$N33aE z-$jIRQZeGh+j#?rR?lJVCHLMF`YPMb!SY>} zH}4zrz%z`x^|vcUoP=&~)o`#;0Pu$Q6LIHH3I&}HzL_~!24P&pFUsonoCGtd>6VMY zv&GgJbY4lOz|9jll3R)ZqJgUwvdqx1=f1~iM<%)&U@#q73Hj8N*0SjY6)W4(ZyW=b zGn6cvRVvmoxo<2yK$P6#cBJURORd(eg}7|v2=NmDVAG2mr+`P@%M zWpeu(=zHd~P1%UD?ed(QJE|&6$0xJ)f5wRdGV>e0m?ot?4@^DWi}U`KrG%aU%f-X* z^1z}x{g%r76#PJuYSn>MH9d2QfT!2cNjtZ7tT0lFkN0>d%ey**e|LSrwr@8@WCq3h zlwBx=W=kgwx}yb?Yr zQO@-2@kMnVoR_H7v?8)j>eELVsEh0(&>dh+3h6l;i=@Z+rQPSj$)|Wcwac0Z0inp1 zPw1yF65E$A)-NH*nx8%ED@advz+3Vk>O<>Czak)azoS=BSyEV{;bizI23Y=ub{G?E zFrpwmQRJc}A24Qf3)T3ObldjuN?BF=YWZ)4F5gw(F6(VH0s?&Qs*3e^hmIoGhPfE| zol4C!*$`OUORZL74LB5WQYhz>@5Jk}EU3kll)3K?S~~P+huAPCCMY2k);)hn63nX#*-UmvSXUUj z%tK=m{ayrjYExWU@gVcw&Q{prypeox;Ttx3cF$#bX^4@oJzArIA+XySM;X9z7yD#{oP!_Egc&3vsMu;UGJX2~BL1-$yEET77L0Lrk&=+Hg6~ zzN~fsM!mLf%Wu?+#~E{eSefWarJHsmhI>biW4;q1(bcP&0ZJahq~2 zsQ#{f&F_hEZ5LeY5%>rINClc_$er6L;uuv*uB~ypGSmb^xDC(4CG^GOwr1U1hSQAz zS`X#@{&zU_RK+r!+Ku$18aH@d!0VPm7?`Kx`991l2o@P-FMH;gCVq}i zO&@^b`jYG>k`;fJNGZJzp>28lNIYI_dsN%mj14+`&$ zW%<}D?Av!V{?PE?wty_d8V=fGiSe^w#5t=hqFN6qJf091>P%Lm23ZgavdpZBt6-sN zR*ghj>=K0wi5)fz#-*?^y_QAI(oq|Sb7AyVJ8LUs^t&IK;i2Zf#|w^NuUiHkG&VA+R}i=H%MKbO zI@;GI$$-N#rkD{5{6p(oHsf!Tx+Sym>x!HTo1)_bB1okUdd8@?QKS4w;8Wjkr-a{F+#5tD?GnCg%Q|3g5}yl*GkT8 zE|$o0ybGbVKcI@qvB}#kkM%(Vd1apSD|hFfHY8|YJ!v?8egBAvJLUZq#QAV-VQcO9 z*Tt7!uSSW^RqDDw>;MI@=I^mct43&MZ>wHq0xx?2pl^yF$Q2K2N^=P8{mR|h;(zOw z7{weCa~_R4g(^aA1pT@Di`$6S?QKiBR7|!x(vm(dF&y+uAxy2CcPl!%6lqAgc58B7 zjTRhqE(;6GEyG&v_T++cQhGbP&BboU$%!`XfYwlu!)+`<;)0?2j$s!jc1Zq_4OwBg z5vCT*w}mF|WI$SIF8Td;?3Jx#MqQTvSaCq5E+Nt9Y~bV8@%6W~dQEk!GElbX6^`M) zu`LFCczGgc z)Y>w%=_twZYj9s{pP{e~*bx{$W#dPG&kq|V&3oK$G46Kq2zeC}E45G*t(~KuD(bE_ zLoDxBJ!sfY5a9$0MwS_x`_>z99}psB*eQGlH?U2s`Y=Vve$VD0eRuNnkl_Y8nND8y z*R09sKO%y5eM+gqgHkfJJl$PtABLs?*`%D<*e{pcv{aQ-xQm_uV^dUwjzq{$RGqSl z^#UXTrK@2hB64st-AFR~Zhnk8!DXb-785JSc01ZZz?|!9RM29T-y5+ZZqrr2z6F*d zE0oylo4{mw8?t9}r4jku;?|03Eop;Xbxyw4Ea`!vi|m64uI=B`TDVc_(zWRGi;lr88g~NVi0myFM^{nK%MF_l6{$=(Tqx6adiR#bc^# zsK*O~4<&sii%P*Q6Mi?Q|aa~E}M4{F!CAhU*sLk6g zw@Ew&S9CAbY#nrtXYwo;#>PnLQ$3=R(-!pKpY+&(`1EB$`w~$hkyf;? zxyi}Nd3e&2o({0sgn~azt(pIcTBQx?$~2>Zs{dZngU_*{)G;p`YQJp~BOt;{EuV^u zhj+8nTNRtF$X2&U3&HvMto00%q6rKRJTJjZ{Sac*6XNie2RZ`=ks`_T zB(OA1-Ec({oAu(YAdHPfG*S(%&dkl|@Rr+kH*!M3Lq~8T_IWhfHa!fr`wCH#KUbUN z88YZKM4Fn+_7rR+_~^p4zW0B_rY00R9aX7sp`GR8w2Iz9Rx=uw?H>7EU}7bCeR+AM zy?|#FiY`2s9{gF0c+n?Ac@}g!xX1tVgD>5!zfn*;P#>nS9p+Ol1Nn@5_uDUxkQI7= zHgA0mXl%9Ff0&!f`*;CA;ZGwvBPTBbUhj@j#{G#^&+L?pQ&6J7=(s6PW{ZAUN)YPl z)>|ywWgklDIH%8Namv3-?gLES{46mf6-g))rM8uEb-6p1dE;AQpI}YcUhe|7cz`G^ z1^y47R}`5jUI>Jp9A&w$*p)1-#|(6a8kK1<|3XXl)ottPU$rdjd0Db?0Qtv$PbU*u zx$J~SO+$e1^7fpc+F;Ol51x;5ux;?|SFDsklpr$BPtY`lm;AIN=mYTS&PUkBe}Pd4 zJ6i&j9I(6T;XmWkB{J3^TeuiG944Q#28mGPJE zNDP#t=`VNX_$tb3@n!_)?bj)PMI)B!D2HYXktL=?PD;J&ULa-G|Ml%IG1KWgt@%K8 z<|X5i&MMKH@rw$7>fl&}q^IG1eEyhozZ*MzZ)-A&1vv&{ahc721X|+~VEiMPx z;A&W3%fkIx!{RVY8STxYVroOq^^c2%IgQK9DTYX6*C#A-ZJR@7G)|^b8o9oq?r~hS zhE&WWgT~+S^oi*CnnQp49!m}Y9h`0=;qr*@``Zwu2n;gP?-&wRyQVbj5;BD=t{`ZS;|S#RHz zhk&;cQ=8+*+sQ|vvxT?8_wEVJ+#(}!!g@k$Vn0?*M;*H<*O%pl0TQH>%d&YK#l^XE zM4WM(epowcRp?+Cm3$!Sb}|=*Zti-^MuDMX*f!AIqn&M%c8$BWfG z;NJ^P2X$5a?N#JYAoBa=MUQ zcKeeK`3E2wRa0Xe8cgJT{Am6y~)U@0;D$NGI2u+4&WhX-1>$Gf7ha%|VO( zniQ(`SM?9;6ywpFoLQ5TEU9oFq2dR)wS<&5&z#mXxWVTyidpyXYd3nP1gBi4orBYys|_6zrZt9$9{a1#z`)3;t7(oeQao zh-+g7l2I(a5bb0DpU?fob)H;Tg)&}E1H2bZNxZ6j%f4r~gV8SR(0p_!QKRg z!*c`UXSDm8ZwymqctT`B9ha5ld=cas5$@z^_n$;>XX?hkbs4p6h!-EWxAZgd5nRhH zuzzIxY_}iOT7b(Sr|lo{&67mP1T$X#xVSi%@r9xfsxl5eYMxZHIPoISdadr>j2>3% zEF~Yio{demEJMKV)D+0rocY%|X;YhgN66Y0_CpZl85+D>`T=GZ)>E*iRYE1w6q1OE zWFb=M{y#a4q6gZ5V}Z`trqRDpDU8)Hz&El^!%X)$`ZF-L?gR2z{aYQ&;D)D;p239lO1Hj|T)1rlum z1)9lF_9~ESUH992Hv3kyVQdVg3o7!}UR`%TKS(#r)!Oph5mq9zhb3_d9n~PORHek( zD1tQ4ZML-BSi^Ai@^Ph{*Mw3PqjmtidoZu#sX&KQm-3{-1eJ7ua|0Q{hkUqc^L_*S z$b#f?eLSuUyEGvz^&i$RSBRE}*O&20{3meAbx=zee^~s0)pqV_l>K?vPP4pEDp-MP z0zs!#xx>*vG(#Ymd3t~EfuZCNF)Z$|EQ`OtY{;agG3d}0ABtg&DY-1ZSsGkD+hsZ7 zJrsfv1O1cXMQ!fgLhqsfYR2W*r)1fjvEtG8Gt2k5Q4R-b3K>>7)}WVFn0Y66?G=3; zW6$gMkiiTmKpyN<6{@{k{c>G%9fL|60XdpMz|xm6c9E|oYFDyLi$mZKU0xxxF?Rm( z$I7Lbuqx^K1t#Skqv%A16}lb%)1D>e{)~?C>#zAjd%JGRN}BsRk+ z&-k?~N?YyGeILWpOPr$nJs;_{6{q_*)&Ae>|G^dbnbcXTS|TeetC-F2`FMR0%hjDz z`;D{^A)w&L4;oWL&%e|s;ObNg2&mdX+zsoCOODI26Nre2Xr{ycB=~>Pe3cbo@(T*~ z#|eBGbvcNn-J`0G z;vxbTV{t$f4CGX(Jmvshow0(ftPUXaq@$w)g1kik=U4yh zFFdZyUhTMGfjhnvgHvB59)MR|%JMFp9Iv+VnXcE0=_o6ok8-WWhSaux)&w}yf7ekM z3j-97(u0@d_h=<9E?(UxCof;9E#9+XKz{J$ZXtD{Kq?v*I>)u!*w{Gi?R=xhf+&dT zUrY6muf4az1jUa9zC+wkC+7E)I6ck{^EdZSP@Neqrl+TCtnMU4)jmfEk*78{H~(^N9QjAy z4~fo=10^}tr5xH7eK1|(O~8>~6lXj%3p@`(z^(};QQ%^77;MCDWGAa8J&aS*c5LD?EIhK1Z?*FY;0@@%350ccAtE^TxkCFBK~e4 z<#_~L8H>?$4uG(l&2AOX%r~6ETrxj|TckN<^pFK?&-7DDy~V)WAu7>oV9akhnk=X&&Yc>XfERbUNGLRkh92in6h<6m!jtl-WW=c741!_OktM0xp^G^_OO&d5UlaYX)YaKZ!PT$HU)2)TtF zoKKeOIo-W)kGn!41ziXT-8{DemR)vn#t;Nqn?Xx11C+2DmMpJ}aa!5$CW@C&l{-@W zNXo3`4?tCC@^4{$HD>t=B0S& zZHR>H$j5ha+dDgXmAO|F9_lm)h7=|vQdWhyEO}`KRy{7E9)IFdHms0=l-tS&Prb`7 zgv~M~{F}`M3yy zhGY5L%5MKP)&IWk2mjugCx3WIivl{Z>7d?t-j3=1@(>T7OlY_c z?)HS;?aO|BKA%xbj~>W!FP217B+@UR{9(%QQ~tk~V?FY3D-dfJ+(Jg zB@+0_Y!F5cQQ;`Vb7@JQW1>?4DQd|w($JT&AZ_Y*A)~$ccIw4Mz?24S5&p0`vYXbc z(_ahw@9h&40GN;Um}W)uQ>;sr6;TrsVn3Ns8GV<}+CC6919v8FNh|>+QVL|+>H#|_ z@bWpyV)C2*UgOd{82|X8@YIl1g+0CjqWa7rl^!OdfgCtJ2G!b<3vM;S`Lr6%GbV@&WAU_})WX#2M z7PXE2&zJC(jdDG~gMyQ3NCU%16j0H!htqR>)_ zFir}$OmkFkcKp8&7oAck^}!cX>^ylh(FG&tFRf~qz@10F=Z_kPN@dX92AY;;<`!=0 zX+-B?$BSGZt8>TNFRTYpVT^{(CH(dWN0738#UCbHliXV!-uc%O{9_@0w8HqrqhP+V zxaZ{z`n(kuQY2J4$GHHM%6EVy`B!+N5EW^!jB9D2Y)qtLuDKB;+Y?}c-Q zpxbC)bR`dAA`@+g%8S|)*BkP}<7+$joOdQWV*XBU?pt~5xF{{oKfehXCNLgli-_H{ zc;{E(An^D=CqdwT>)Q=ii6kXMgze^K2HX>4NR!CY*E50hCAzHNIw4~}s5xLlp z6j*)8n#@C^h*VH50Le(Z5uO>YQ`g)eQibf11pA*`1!&I#7(kBN?Y2B^a{$M2YLMNX z2S+9pKF%Q4xcWtsL3Gu~Up@KgLJ_>lB+WvFWHCAwcddvZ|CWqGx1z~uUQ{p)!&D1K zO{s36@YVDBS&l0_(xlDO*@$hXuJVchpCOw+&)?r8nPBBYI;cD zqm~vq7KU$Q10*TY)rVnEpP|x!AH@Lp=N&W<;-rTKh-E}-Z{kR-3WKDePB?^!eF1BD z#35gGsC50D1Uk$-B#|^+u!o(VQ&WPrJ4#C&slANbR3A&ot74bVzU!X6nEP{+>0o?M zC~8JVdaa8xUYDs>!ry3hHdh|vh5o-hc);AZ8l-ewZ^mRE+~BEI&kWLH*^QvmNCZjp z;Cy4Ki7CgSrK?m3acNwF>^^${?}cZ6UN|Le4sn~a<+VDKdy06W6T_`@eSgj~M3vv3|GzD=PorXZzn3_}>-y-xc`(X9d)y z*S7}~rc0D^)LtDfw+AI8B-B0nhs5gvl$y}p<}16iai z`-^PDe>;f(j@yJi;6Gqvzux)3-MwkPp5b)dO8}|22ZqVnRD+%Frx}20f3yYEsNPoI7UnEW0OO3ofP+$SY)USv>-DH3egKZx z<0BgzrvD~CAi>_@_abeiY(+Y1r_R3je>;0)VX1K58zThr^~nL&Z;L?QlccFXs7|py-|>3$bk;$VSuI(8e}4}o zq;ZP&UEhUsd@f}~M81xCNXvxE3BKNMp$a{J0aUxGq&UHZ|Fe69_zL8dtWa~b47FER zId7RsrugiZogKhV^nVK#dN#T}UX+{`&J-zbx}K6du~+yUEZlWCl%nUh2p|bwcc(9? zZ|A7~ckcfEy-b^3qYeYHq@*=DafLy1>;UNUA>RM7#Q&#e)#|LK0U$Q`ioFpgK9(bt zW17kBxS6bLZxofTQXu(2fnG;-O@bXZHasl$Aksr>_jV=p<~+(V|JmeC6Zu z@j^`yOoGkk!{WG~M)I?N)7$G4OT)%7P>}CmdC1e~+H>!CT3LG@UCTW`UaB892T@W6 z&whOcBn0COL8Q2Ml(>d@7T*eTfl+G)^T0@!+F30IUV8fBKC1sr!T)2wXhD%uW&!w& zanmL9+a)v5lDydLOnnP_j_^dlrcFI6H|WS7R-pg|i9mFz^`&N2lW+~e1|k^4LvC(v zwm+TnZA!6Ot%7;%{eIBg&gqB7xnz4C($+=C3gt`auvlY|O+D;o#k61Vq!>s_N;(x( z=r#hMY8_yPXE1;Lu!KIx0cejiKgQfi0x%BU0m~4EIp~|!Wd1uIGK|_B?)M}0^ZF44 zJi##JXSsWyU1!w+{BR2dp#PBtv$4R~N`hY=PHJjuwzdoimW#hAdF5XoZ%#yq(8y&v z-d@iB;zkZ@f`JwKZI3|D^LFvV2_cWuw{H&=M8;xyqT1awH00zx8BYITPX0T8|F1+| zyz?)WGPVA*&&BV3TpA3ZxuFng*C?+H4D1t0oi5xE*C3W|qcL>ECBvGa;;$o{ZHJL_ zQ>0FJ<9|}_9YM5%-sFGefY#EwjARoL zj4cG=KIdw<%1%X**fSg|@wI*!irZEc7skd{Xt;B!nyx5c6yrE9TntPEVGhye?lpQK z>uM0DQ3QI7`Hy}_fbZh}M<{;1MD`=iLG==n2H8Rh_10=h`y$+N__Fc!+YFRwayC9qqtzjfW zP%J16LDa<^mgKGE>>!Nt8V{RuS9_m$YpSMIBD0ma9mB>JG?^;9{a=YcyTIk_*G z3i69^tmpNlR5LjzPv-AV1g_0Uu2CvQo4y{P#i{zr=n@B(f245}JvH`G^S{BA|9%<5 zFrt57hgQr-7eoC(N|^+bw@9q_Y~d3m^0xa}20j<>HsWNp;`&ZxB zE11vzRHw3=oSc*o5S8e1*_+dK0305p4d1eEDriTB_lNEX_m=4CSd3Y46W;+TWS?UA z!{;WcXhN$^+A{#~%@q{MNHok3l2_9=eE7>}{~s<=v_uy)mI<<0msIKB-@Cd<4*RnO zycvW?oRk%tV4{9EVzU^HNlzKE7!g#AfBn$I7`gh({Y-xbbythS{!bHNxD9-7u%2{R zJomB&_`cP|qp*BT&E)__6d-XC=s9AQ9eK zqa+jK)|cC5IAnZrD6i}NE-w)ejG#D|YO_s%_A}TXd5;oR*rOiC^Jhs5xu~S%Deyq~ ze|{-oAln=Ra40nFbWXb=CzjP%(iMh{h?@6KFo^SCX1^g|mg5?CYc?15kg=H~6LRKs zqx5IcCG$xV0MIAywxRUZUpss(J?6tS7B6~6IoD>t!dq;H4qF7zMo}IxQkq*B^uG62 zU0S&O<2J`ZSJQr8DDft?OL5Zq3rhq=&`-)u$H?XbKwvvTkbi z0fkJR7MIdCT6+H4SQO_1Wfky6wG9uc_MbkA1pv{>{~i;wrz08++oP>kyHa_U$Ck;Vv!+e8WkJY z=K>=KKsVuO+GpS^V2rHEh`qANeNu``Q?f@GnrbqDpZfqlpj_=wksQu7=5-OUZnvLo z2ZDucaJ=2)$zHPf+$Ej5ye!Vwy9QLBQ>lN$0v#JSRe#~Y1nFC{y;xh;WyBtW|3lha z$5pj$?c+*F2}pNHBS?pA8l(}BM!H3j+>~@9-Jx`sq|zas(jn5F(kb^ zw-%qa4bC9)AiUz4kYJym<<192(0jNf)UOSC+Ln0Y7c(V7hSW4QkTTkwjf#D^T2hr> zl3jgMp1F4lflUtM#O)*H1m5nIG+>K+uwHr4Z^Cfmzdqn#d?;k zAq@`4{g7|zosm+O#^2fnR-p7F%{GgzSk#gLRkcdCc4{jEA)jf2-swmPYf{q7)7Zh5 zCXJmy|MCa+qo1V1F~k42XJ6W@R)Mfb;PjhuH%Ck5|H%CP%TM#)AbeZBnsePV<-m31 z`26^d?0+0eFtLkB!!gVD2lEo>*obyl{xkjgi<|$0gU$Cx(6kKH(kPBpK^x7b`H#>Y ztP_4TWd|LG>A8Z~v9O>Hv`okhXs@|`jc>J$<@$U#|ApDE=k9m(9e|1~i1D({Q zq>Ifw`1jn+7H6q~<^&%~{=ugIl|1x6 zDS$~KbU0?u`mMP|Jiu&ion|dB?kpiWElNYVyLlDyi||J_ro1P@jKZ^60e3(#&+t1v zh(r>3!7;sxD3kcf%)J17fFhL66+^ZJOM^^|lr7g3FZolF5Kn(XWq)pyi)@+yK4XBZz+n{=f_gTkCM(Wt2gEj?oU5bPMW*bn+T z3&?le5@`r(dS%R>E2x);pmZ=-XH$Nr9JQt9_@J-6z!Dkz%E?>)ad+VZ4E~#0phWZ-J9Mm0jf&mFTopc@)HGr=g^jPrU-7i8W~k zl7aQ|-0s_@)u6jgUM zUivVYiJ^MoD~r3R4lCARx9*Feht=H@0b{8G&n#?bb6V}>43{6)h3IHaN3DCPF%`Q6 zWp9|2lX&zL#OxyJCXFb+uwrud!Q=Ih?dcbryi}0Pr=NXhcu-*E^hW+fV#<+o5v8t( zb?yu`jS?}ss?D8xp`L=BvBwc#deo5yFLwI*!@B~aXL7PGC1q*91LT6B38L&cCc zfJH03@>1$*j3-H=L=mM`$+J#H+3qI|cQ*C?H|(F|!RS8P35q{K0(e$-5<~{xUH~bhmh!OaPY2yOX`k7R5ac| zY-of|jUeq?qgW2SF4Wxe4fDYNzx8w5>9OA<@6yUsNx>wWMcq=xaNQ=fQR zI!umlTm(`I*!xJMyxg5$F+U_Tq~gSgel6u?QQjAQI6`*!i38>JQ2)2MaJI@N_1byX zL~Gga^&3O?q-cy!2j^3WyX>KUTAoH{@0a;W?@I1`5D$En%o+Vv<}#jqYAa}^D4!M= z9<=Z=U&A|&O=NLd$DB@uaJ?;L&b=W>#SZcp#`gZwymfYv$7}*!x_3XYe*hva}~j=Qq1?^PDAdk}==@csHvZ zeOgQ1!!khO`9=2 zJ?LqrdA0*P%z{-8+IHF0L}0w8Qti1}@Ztn3R?L zW3zk)*X~D7 zJu!rZB3%$BwP&xkS%mG#4ABvlsAJ+b3qC@L|4fEACgAq6e!lb~aANo;e??mXD!iK$ z6b-W*3k5Ot=bM&{lW?|AWKl$cq<5d-eQ+4lrF0Q&)a|X?)q4vKA(3{QuywjqS3mY* zmyKRA4T_ZhBCB}|SzPhm`Wd$<)Z3Y~WKiCFVk;Wzqe-)gkR&~V!V#5?V`Ah6 z%30HZ0^vat(F&38Xi_L~WFRdir8H#vd{NoU@U0m%DGMRg6hMo_h6768EYl|F%~k1M zKGv=ZC*t324Pmi3Q+F-e_md9=KXGSaBBOX<&%l&IiLV9j(k)Bh-F^-}r6tuNqIBS> zD^NlmB1B2rD@$(@SS6+?18J(Dk?BJ66PJPDHA$fY9#&}<0Tc!o_7vSF(63vam=Op9mT<3Va#ZEo4V*&wvb`Nv2C9 zo@Vph%Z5QFuVGT^1`u3^bdeuE5Rj3@Bq!SeT^FkLV{lTM+{nRQ_P}2)#4ZHBnZWF| zKrI8B9vzN}3{H5oGZO$xegEsa^egq$ioirK6hxxKONBsHb$BBT+D(M=U(Jlr1N+t@ z1uhZ-8u>VP{2@x^B2w8L!m6gJKW zz*ewP??#{izhQ>m=t*q0y`!M{uLEL!X1DnWbn87Wth4{UeFjR_nSZg)`i>rpE-e_? zN(3+tTlTP4t|>(;JP7%RyNS|ug1YpPp|Eej9nZ@^*ZyISlVsY>^WNw`trdG_N5*HD z4MaY```mT_ODuS#Z)s`i!rrHk@N{A3w~cl!`-{;~F6`cXD_aBs(PfeS`5Qolne_kf zKokQ`Mn(qk1OiBWdB@5!knw|;xOvE$a1|p5GRNQpI8p`7LF0h2i~krn@M&U_1M|>g zsGFuq4(!<~hFa2}hLSpT%dRW+@4iXUsaQR0X5C}ms$Ey7v&4)?FCEO-S^l=@?Cxfz z^@j^Hr7ZAsyPfZ+-CnJCa|mBf4O-Rzs8STVSZO?)<3t@~^*9L802MK;`GkG0w=|kL z9)8T`1lk}+;VYjrSS0mj3M(>uxo8j=)+-S_bfRGJedJ*-P6%8UY0$PaLEjNLpg`t(b5YB`+W9lQ$(sRwxix9EXs z>*hCZ=hY2ptE`}MNA7P9ebTAJGv{A@u_a-fxA`Ir%h0OvMBQE;_)E0g|L+F{rAY_Y z-xX?5H`433rWU^XrcdApTG9WUBl~D!&z2CC(lqk#KD%=-Rs3^Du1gx56 z`=HXHFsP4dHt_yO7#}Td4k)3~d_7AcL;>mFrhtN0{>GGKU|q!lW=}U$nLj5@pVDa9 z1AYNhd48*Nm{vnC*#xbyS-`uU(d<| zC9JS%{GW3@{kfeCY>|sVR(j$mff)&=(% zDch|@nv|rGy4&>=f$X2p1Z>9sUlxJ?{dPz|%>-7Ks|8<7C%0x!AH}R~rg$hQ?r$kS zqcS%S@aH?1%`0uaJ6BG59ppXFzQ@)~Vy(SY7->3ombydZ-``5=MALIYbUBI%p?XML zfS441hXm)0d{AyS27ev7Qjm5_(+U7^Tdgjmif~vYWcGZwYDHQqQGALX!;HcS;aj{L zW&M*u0FAO~CX%8*%0!{mGCo}mGS`1`RX$^~@T4^x*JMW_%mE>&N5D~FQZ3%OW=g`v ziVlebql-VuQsKLla?;}sX_O?v)C5Uz;gzdn_koH}OyGs|Pv)`U^B5vsljRvKtmxQ; zlVb>zJi0;fWJks7@EXx{(^*H|hZ-BaH>uF1=tx2+Iwr_wJ&1;C^-jkZ&>(>pRFzi# znzjpsGjmd!zFiW$ROVot_^tC|@EXE!={q0v5`VyuCPD}_9=}2bTJi1r-T`slJmr04 zoI~YV2LtHZ^TSmDF_lp{s%dLqkr$Gse&&yEjp@*i(H+0ZNX8jX3M7kCGQv4xvgBXF zFpt63_!^@V*~G-%X`7Y8$etv@w<~=pi(2($p7IO%*9;8iYDOB5-t79dMU;4N2h+e# zf2VerEWuh@#1GiI;{;)7c(K&j;3aJBQ7M9v^V^oFF7r4?DzmJ}4jK|OvX#r>emC$P zJyB$OsIQj4D$uF&M-kzvX5&o=d?EuMw0R06g%D*qR)3@w8&j?Z# z)?{Bnon$vS)Kn?Y)TK&4=Pp63ZOdet`t5M|qhp?~elWC=**^giFBs}XO8A@?p{B&ZprMBs*K^G{lT;sVdY|{2f0oLK~*#TEmb8g86F zRC)q8lxt5d<@LDd(&E*u{COE9)_sOZra1t>u9k_LB`+WD zya(~PcZtkt>I1na)no_4nj8E`mg7_9pBBjpogiXjj6qU-0%s}nJurLOBPwEzt_wtf zPb&nuiECk=3r!p*B{g*JQe4S9=-EVW%Of%buwTu&@MU17Dzy%2pN!*EF_esYIr>qo z&SY8E-o?>ztk9hy7kh+-u>+lU6OCGlsNB)*66xpBJqeeR4TZbap(A2pKf<#YOb)7W z36k3QWOheaqaky0mHp~l@k1IV_`w?_;w&kX(XEc_MAKK;ngUt2tihyC9;!F+f4 zo#f2wWegFO4Kp!zae4c*AlXZ6M50}i96Yn52eXD^j{MvC^lJJ1m0YO1cW^acJ$51l zm%npQ>J(DM9Zbc|M>~&&N&A?EzXjOQq-*xOT~+IdbduA&|Ie6VCMUv2I8JTt4BGw% zn>T8jo>aFW>Q_?UDCyz*prI^EGq!@~lQXI&r5|HZ0YG(9w-iF5FAyYCQC0E2nJ`-G z*OD=91~r_=^7itQlCSeCi}; zd#IhMQ%Z6#jlk`A!W@K*OgL53ZIKwA-H9*o4r33-z@oroM%_^D-dpE=ef+H|Mra=jx(f zx!TjOC0{=moHoiT7tqAX=D2ITdbXO`ubP`J|8aa?5fyAl$Y@}dQ>$8CTI-2f?+Clx z4LNTn;a$4sq)L_A6u4MX*}DljyBQVsJkf7@OEmA)v_0!-e{g#-UP`_?t65u8WZh7) zRBIJsdDkd9l!d2?Jej=@`RputK^RT+Zd16hj0V>(F4WP9jCJQDHZI}fzSQ9N_-mhm z;7;9Qo2V`^^mlij#v43kBqK;xdsc4?uaWxrxhy?N*p6`yv|^a8OLns+k@C50myZM^ z8QOzr+07N=(O$b8D6_t@Y!{qgHYm+V4URgV(guHd?d>JC=_i)anQOM^x+uj`2a3)4 zw50RObv4#0L(~N{z%SZq;zn!@hqWLqbg)YkzEg&Xk<4vo3dlW5s_tb zpfkDPDCC_iuJF+DZlfuAXee@3C}Gs z+AoIj{ik7c#6^DVS_3DHg2!Yz<1VciD=La@aR*YN1UCpPpuv0f{PpO$ZoDbB>DExh;T zQDN?^Ps$tb*5H>o3&;?aScU0_qKhJfC{qbFweD@~L@|Q8ITE5MF&!s}^jXigUtl29 zW>kz{e|(7wL4Sua^>DJ992v6<5zT~~4#M-993Q&>sW5-6=}QBbPIYDFQ?`n14_61z z>BG(xpMmnGt*jOmA21Lg!0fPqA(k zTMKI97bFykpMM{B&|wq9j~&RYd*h=psDx{ch1>mu7!?K8B+lmX~3>ZC-IKifq@`>ZG4@Z%VA0trhcrg*|WMjHq z@;*^;!m^A8)oX&o)Vinb$BrR;Ixwq#psPn zX2AMNNOq4;e0j5W3f1iSqll4%{6sBMh8#|`l@nYEN>2oAYTqHs+NNWx zp5csB6PRl&v6n}gXmjLYL6y9g9)#{&c2MV8(s1*T;)W?jx0HXf!P~*0DjS4O5<%3xz$a(IFa5qq)>yu0AHGZ zhq8k8uFyD2wvP|ileCvFTKz7g31uA60}!ZH9Sxvm&!8_FUO0A_w3M-)7nEo0iuNQB zGWH~Txp>kMk?s-{ipLdToIM|UXLfHNLDU;9EG;!MY)Cre0V4IC5E5imlN0nasrFoN z5~UP{0}Q*@Ls6_;JbWy!4m2@A5j2R@&fP=Kxw=qk`=9&HlHme2Ld!8Yc4)0QEBJnW zSSc%ngtA>L>7w39T>-1=0oCms*c^lzkP58Ugq_F93QRG&CFX2CWVO9bQGqWb$p^b7 zCF#3Xw(jhx-tV8qBW{kP#&jC0W?Fgl32RZzBuqZEXE9{J|22wacZUTcVueWR9oh|f zB;_=I>78iXwwvyi#F;cWVt*`IxbvsRL^yB;Lf)!Pz5=&DB~#yx##FFkG1Z8QIdmVY zrQ2Q@!CBeqz@3ieE4G_;uPjIQq+WH!*z2o=znFRd(Dwd2xE7Tr?v3P|$G-r61WHHEn7^ zl;bYMBRy%sM0lWCS7A||-gb-C;XdvISNV+41B~KxY_m;d$d$%GFs3{Tq+i1@KK#B% zpU7F!n?M9=q_<3ofs_pU&Gs*!3^0?4cJ+^7;hyJ>gMXm;%=Q99xX^y^XTw+V3J?~C z$nn^X_EiKjPB|&1JNBTdir{Ws*f(=8h8Y$ei%ZR39ZK#7G?ZB}#WD_1Zm_i&c%Gq% zdQ0|#OL#NyQgU`7kQM5Xy>`tpZBADi(&{I}#qC~t?ym*$85lOeyEQ^Xl_SnI+hl(I zjb$T|8JPjJ{MBn51;GCYc^`WveBiXY#2sL!NrY&V zSesYGr&ksooxkfIyRW!NLMHQ|9l;+A$Y&q8mk+{{$|*NUQBuE;ui{7-8GXFEWg$%3 z;x`hC3Ph3w-I5H#6G%oyHMw}iCb#aa*@_TI)5NYQUh|*~W?V|Ny8gk3)lJ^;WA+{v zW}Dq!BK+N`HB9zsreseZXnB`a)#?%pu)Q^j4;KhcO3k4@sfvY-QJ+u-cxBUDY9Url z%54K{3Dagp(nQb=$0==if0asTyH#vdVOqV_0a@+ABuZm|67`aRX9{o<_DkJ+@k;i3 z10xrW^OucitPT52`WL;zDHnM;IUS!~>N-0NP=6p+MGz4=8!5*h+0y$1%DDoxwV?;V zMO^|6Zov*YV23E!vvOTN{zwe*Er)qtw1W!dyB}w!T$r_u$l8!alMZpsdMab~Ba7|6 z#@BQS%vB^$#kpcngdK+N(3%VEt9_f+?Ogs5x=#F%NoWC6%b9jV$@He-0dDuJXO2f{ zKIunG7oYcKK|^*w@BV?F&PHt0!W*?96hDUozfVsqrV0JnT44q7*t1ww1*@ClHv!Z| zgk2@~uVBoCm&ktSIVA1oIB#}_Gmat0J~ybKi7FEFH}TXV(R>}`Z+J1_=%mzhb|(;~ z%3MdnAT`KZuDpQha_4q?cR#CUA~V2M{x&9+Z1~$fI@MHFk0+{|@W|JkJl36VV8U$A zwAuAH)~+7_GVFeoREc~a9UajZ?95iQwr1_A)l&Uk#QFU1h;wp(AGS~ZD!#g*Bg<%D z+}rgr49OyRgMIiSlx09uO`j_qaXXJ$0N%jE!^^8E-~X}tJ7bP;S#%n81!iVZamw4}aj=q}fD$MzX$yKVkhqXc!Z5*qr0^8z6^ip}1M3NM#a2 z;^)+p^AG+)5^{&s47_Uqyx_pV7N1qG0c`UJf?#}k&nM0&;~EJpx~S!xo;O!pTU)A+ z98Pn7?ClA^{bY0N4IlRGSxih!Y6}#itEviK-{h^5SLjAW^LzH03pxwM0PXw>-LJe- z5qMrzRb@5TkmQ0^G&oE@`B}9Dx#2nT@yQ8bu2DMgQ3$&nn$Tbqk&qm{#Me8M6!-Ej zC@l1u6MX};R?Iz+4Z&+4d{qv3l2@dvthr;D+@LbT049Q-%W9m<(OSzlXD)@3 zx1xI@c1`=OXo|wuT%#6t1sA=|#}WD$oz+cb_fN~mUf*1;3*Ag6+{esALPBbG-9BS! zTCtw~{yoZiu41({!VS7UpzFi^d)90%Q||@=+h1V`^(V@&plPlMn#P2v{)eS>m2d0n z>JBHJxSjT(OSz1`|X=z<7q6xovZaXsCd^AwMTeGZ?d*20nn=xgm0@T15qh={ZN>}Hb!lr;?TUn8G|1=js;wJ!QSV|+I+wCMu^|h2=tLL1PisbM zaJ#fDz_L6OdSo%OcIoR+T{)cd&$12jxOC><@667|jiNu{eL^ZYp+QYp5d66+k4ArK zjZX5Osl0;Zhb?1lI@kKX8h-zlM;SB=6H!~!(`s}ZP)z@V&*@5I8r*Cg3}B(=#EoKa z8C^L=Tzdwuix(xiC{{x?z9#&b#=h-)V8y6hKmM3mEr0RDKtE%nTEcyti|PUc zyXjHpGcorb*>H>=$qgN3C9o=ICHa9s&uQPeMqwzU(Fr5q)Je`M^4s|Pov6$3m2Z(V zy)W}{B^gk_g1j23prMI%f|C7-^#Wr=@Io?mxW~5EB35e4RyqS>a$+7`0f)h8?9*0_ z0Zov~#=>$(`YP3=G;iZ6@zhS*s)YNcn+pA1^?Nk2;eDw0omREmHe>SWO{oRNhod98 z(p&L}4n|~99BVGQjja1>qo7wW%aqx~^mn$y&KnH8nI$vc6zFN?apFY>dIA)2hw04f zDarhK65$C^EqYkqDm%?{A4+D8Kxr0C7`Fxf)hH{ zInAHjdCTXHvX3MgXd+(-N+T{~(+zP4#Nx0#HFb%9sxsvLK7GOm4)eqDCO}gy84N7+ z8(-kNf1t?|X70aaCiD)^f=HR@%R;$nAPCd~gW>Ywi}!snXbs&M7>1-Re8G=qM1y2S z10Ok|2c)+mkab!H$lycMp$j4i{sB+v4W*Qxv{yRNJ*zvbe*qeh$PQ`8e0ak$U4 z0nHXOeOQ3FXm?F7Hw{>dDvB%0N!|uGBKx)#w38g;&_(}fF1ot!(SqPEp%M{6lF0hp zU13n;A0vC^81Z3Nskixr*~H|ieaFIUrM&XZ$~!xih0 zK?ukn`TvYYyxUP3kuJX8`WXSy?o#R=i2OxTK|RaBQ;d_x^P-?oM_ z#I=&v*Y>A1bWRj+4?hEyZE9+&9P9P^-prNm;I0J4qPdEOdDf zfT3(yP6D?T0W{qI)tN&ADrz6GBin%fcS@pRe+^E!e6&(#O>jlPX@MJr*$r-p5a?+& zWrIJ9NN=?kalTS1EJjZDL}tSYNYJZ2fdHvM2y2t3vT`dvtf0ByG@rv(Y!MH?i~xD_ zYP`1aNr7TQd^`~v;&k=x?7aIL1=wb^T~fbT3SRaXeS1{hzjzQDD7IL?=lbQcsln>{ zosuufkU&rLPq=_pxpu>g&MNV%sk$<&BByxQFHrxFKm1E(Vv4Q4&k7RiY1Vhd)F3^3 zSMtM~N@G_>?+t%@Griu3paCvITXVs6*@sn`*pY+uZXqSW{-WE1MLGmV&His>;9S}1 z-=RnH^AF0V5(@(2UO8%jqHE}S^Z;2?#mL#o#l?jWRh2KMhxx^)pai|76)hh&HZ~jZ z+U>S!QXx0LuaRNgdLPyF=gZtL!!OYAuR2`k&aRGF&8)NAX4X7oY;>h;>_h3P%h0(jK%l}LVjT1`&=_no(@DGBKrY zGyk_-X+>B01;wu|>1h@%$fdGrqWYprRMqRUTf6R@=fXzF#c8f-`Prlg+idUn^c1`T z)W=_=qoc$8-JN&}=Y0bC)}-cz+Ta+sM;B&$!3~Z->J&Y;_8|P%`h|d6#v24-33yHb zAO>`{Ejx@*cQ`cjarHxA9aqsSm%@(VzCaT&yNc3FnoS zMWQ=8PvW6fq`IGK<&%4W$_0IgMU}-TUqlWY_grt&t}p7HO)sQaf7V=-Xx8>7`tM&P zl}7(*_T---IKH-WH@5e!~J?B)h4@-pY`8RuEYH!Lk4OpqCK^6<3$?H0K!J{O}XL@7S#Q2pG5vspA?agkB=?Cm3fb=4GJ9fcZQKn2?S1YFgF|Z zjDy0>>2KEzOn)yY(r@+T<9`6;BnZj{xbg!PrhVV!@){dcXf;4(Gc$HvrjO}w7hKMr ztvS>9KLZj!cpc^!i69Nc0z9N95tbw|0ttwkZrG6z;Q!G;!1V{s&sqQc)2~*W;${() z!+K)%8yZzZ*C=(4dumBTU+aeBgOw(I9o85mZSjJ#rYDX_~G* zWD(@#H+n!^-aG5NSke+{;8)yI4%HQ)+&~DtbPde%5px${6A+?eh+Nf<{;ugtL)H~* zkmryd_^5AzUQ+oB6X7Q5{R0{*hC4Talh2Why_-01}oW1TwRH$8fV%!ChLL+z5&A8y={c78&~aL3XPDs8p7 zc09IGM(SeIJ=}w`_`*bjFBX|Ut{QxwVlZ<=Nzj(o((hDy8q$d_MH7!SG7p*guf!;>2cwUXXp z)k4~jlwP!&^jdSG@EzJ9U5*IpK3rsqjJC=8w0VF;cR!?tXn$4x-l1qY69W3r@U29y&JFTlCBhc;TsVujB2B(fMZnOCJd!eQojf*> zw3FUV2TU78#ob-}I#YqErpNZ(qb|wl%&2(psE0(n3Tgby=-G9XI!Jri#~b- zJT|03Sd5K(3XKzE0g(lF_=y@Ch#3ZLeOf_cV2A)DEqn&%N;o9fRCR^`c+z-E48U>z zK*_+e4@L?%wVJ3m3g*5AK#fhbQ^PLzzZ*IwsA5~8N8ItE)(MGf%b1JtX?-W1wIes9 zQ&?G4^ps7RxWsq$0WK;e*x6@#-d$|y?rD|DV3kNzdfRCJP9)M>1-5i-=E!lrZ%#E* zhdChC#X)B<(EX7{l}RT*JuA`NFC_)r>U^Z&2EU~{{>yNGI|JZb_YpM6tsdaM%&zeU zDFcgq4B(}2y!)IGxDC<( z%n`x-Za>3ipHS$CmBDMc4Po-8((T){NZVOTJ9>DTBCu#KG%ZvkJ#7{oQhA<+-0c*( z?;xN`uM@<4llYC!2u{$sOe)xWrS(&{#@!28=4@cML^+jc6;S(22U3pR-sPgTaR=Qf zi*&IF((gL$r0I8J{aztZBaFnG40;@3D4dZAMVUj5Kkgb|>``nd}Z4?t}$%IIv*OkGr$T2ctyV zn(0!&>DJ_v49I%9lg`GZi~lo_D-M>%@x17P9B&>*Cfaa~lLf&)&kVD^Iwq?XH5{Hs z&7qZj;0N5pS$-cez8QNUW4n+M=B2X_knj2Sk1#SYuvw?V046A!9#0W9YBknpkKj0|yap`+vBwDETzes==c`us(ExCE*)hXc3g6t}*OCS-AVB)nE`Z3#s>Z#Q$SlWyF0j43L&a=>6p zB7Ea9Uu^YnSlR;BOlyP4a(By!(GJarGbO;5RRL=lmll#!^ue*R@wWB*D_Uz(Hhej2QP?NH_6#hjo?R~{Ro_8Rs<9*lEf*1RZ}k$fr{e)rf6*$cA0?C;4i9f zc{*Ha;VyU;)*esnz~$Uo6W__B^_@e{RVICQ|Avjskc!o}#mFmSck1Y(q#J&274<7F ztF76@fqHhYtgOt^_Fr=K=Fa>3`xmKye7jYUNA7ub0NyK0d90fpxBX&Wy`=Itt&8%j zb$jw`8FOMVWebmTCDXyHph4MLd1=?vDRXr1HcKw)RP$69vxB9M>1ylJq9Rwaxy^g} zvFmET$*baMLoWjcwXTvBzU}&abY)p-y;O;Jn^!V}PC}|qS_p9<) zn)Qyl?(V)$>I}F|XLFzqitjW&volLfkMkiB=5My(N%{#M@cO?5A_s4z82aCjEY6C6 zO#-Ih;iuzBg*^n)UZ;eGvot_Rj*n+1iM&D8xa-ZdM*E(T95Y%C)766Jx`o!WragE9 z{X>+M`SZ2asF$h?Ky&h|dI%Pj{2!VZGDA%!6M)VCY6;eh$OH$;Sd78E45gqSQbJdY zn73WP!#&rXJi1&Ne@@SxR)N6$98_vq{3@DiYhMQ9s8cQ&0sS1PqnR{5;mSwX;^_Yz zNl;q0pE6BA$Hc7sfuhYz(vx=8la~0p@#Ld5H};Y1dWzE)+Jt$#ioZ27+^49K3XX3i zO#nEv#}3Ga;Oo22IvhCrP>qj`d0dsU&V819YpZPs&8Hqu*q-JlBqt?V;|1*Sf{mvU zw2TuE8>)ni-i6?wnjvlGrpm*?6_1g(>=V_rc$0Au3MjJ^iDyF@H1JvCc5#=S@WcIH z0M&vOn?wi=auB6zNyEqh#gqheEZ~QJa>}4mZzy3E&IsaSL2I~GwLmU#zjoLM!>hZg z{c8?{gDwh66(7Y0VD9s=xr}@zY#X^Sj4Z~D`{6mJfh*We3&u}RY3)qq84QhRXk8s9 z4T841Yo1+12ayoGeY=Ug%0`(RlK3W}T0AS45TB?E z0vYk-myPsvufXgsDKCv7cCGxLvwSB8F&7hlXP+6LF9>9O_ar%m(ET`UB228`g=OzU zYCkRK(VI}$kop6>U1vQn3g4n&zKel;`H4e+h69IMOs1Ro18G7^+WzYD;{v5u{ax0+ z;?L(xBKPsIT-jy$A$)Z8cT>sw!n=>LqFDx0To= z-sxAOWR)H3eM&1uKI()_`|$PQarO*X>n0(uG|y6z$Z|A$i%|EadeRzof{C-h@^++k z_xUMzT|VjOFQ_6W?QVcySG?V}m_mksEk`u6OjMhdEpZN`!!*nx z9zR1LqR`p3JtSRSK5E+C;8YcdmKi1-_oY1hnGm}wDiE7Z&$w%dscdDcxAYxCE$ z^BoHmw+9qQEEdp5OiZ`Oinn8ResNb|8_IimI$d9&+nRfT(7M>IrlHSiedFVKK9V3P zOCKj|s*;j+)4$GRJx7JBXuhe5+c@wCH;@98h*g4#85;jbQkEGgyTIQ-j0a9C+kg=o z(GWQ)!UHN*~-u2ub5g%%Q=jP7NF-uDes2p;$?Ri}awC&?+8m8t+%ZG<{ZMYdWptQo5eM+Dt zM3kkqC-qLiXALNC@di6igIg)pek{){EJq-F_~^CFN|!{1*e zqHR~ITFR0Ac~7#K7p=z76XE8wrl=?^)HP*DW6MGmwG;u1+h$DK&*Siv`i72oQPRgf zd8r3Y)2NQBe#A3gYkxACAgOjJGWgE&oooNkRlKlzomzT6{6fa9BA5YsJ9U{^99O5X zW$l!hdny9R=(D~ueaMcsM8{Qs`ks)=4D6|dkzzpE`F}H^l_W9$W3gWq-`vkMKc(Wa zyg!D@>N{YXloC7-5F^R6wQi zoEZ*$0C0S%_8$-He?jDR5$~=g{R*OD6kjii9>Opb&HehNPu11+=47C5yJcM4bnZl{ z{@x=KIZ)r@VDDzOxh*`zlXrx_+Y1Pwm7?RsK zAbq;9sxdJKJ$lURzCJnMd^Lm!%H!yt_vi}unMkI3-XNZe^NiY^UxPnf-(XsuF_2ZW z07~gM)TA@RmO?Ng?q7oB3$7eDh7?epX3j>4IT2fvz`j<5a^Y*8%V7`wM~>sOGkuV2 z?j-*3NFQL@wZyOl#T1Asqd7fLQAvtNtP8=-wKC;@kn%WT(54c;Z4Hq^L-RZc31qzx z_5|-Mejb|Ghn#Y87qk8)zrh~m<);C6KT6=oUN;kWH19nCWOdAc1OPIP9^ zbpGBFyIqD9?8zE>EggNO+4)YMNe~$@>=p?T7eiN5S32q*w=Gl4Nl@o3*17Ah&N1$` z0F7;pH`8EzgH^1;E2wi^&E{FuLcyW5yW>>xvc$#xfji()n9aoD?=Z+O*mTztJ3XBJC34e0%yw10S!x%VBk6i^QmETdP4@@XR@BU<0w!W4jZh(8*ZT=}mx*Vz9) zxs<)B$bdGTelE=!T#0!N|4q^3Fs3v8!gSB{R3=`oC^kGaV(ll@7Zyd!-Q#b74-gtP zA50Z&!q?;V?-`8&a>`bx!()qwPLH;apOQ<`(JRPlv|tD%x;>O#GBzosX_BO4ESvXt zV@rDS%+?)xn6E%Cc{82|xrl8)+x03e4QDNjZJ+GkoXmmD=*&l-Ut;rim40eUT$$x^ zbL**wT-4ls*fIITz43c>??o(4#Mb$I%c#eAPv67qR)5jZsvR7c+=dPcJ)&E;D;{Cz zYu|vq$K81l^S#Fx+z-;Wygpc~oTznoX}gK`tz#Iq6Y$QD{B&edNj$$eh03_qi&5+! zwS29H#I_S?>r;bI&H17gdn9*w#`4zcPfbqr6b6GmWXZdz?vV7!H&1>-bh;;H+^nAo z?QU(;s0bF^ii}mCJWF0_vq2583 z+|Sv~V7u)?fzTD1KcSLJef&czpmU_C1OMsD)Wa%Dof09eNO(rxpc7j=6`+tIX(!Re zFA*M}XP$ifl6iUAE~=Bdksgsc86*lCOVoDxWfdgfo6I0zPV)!*qv0V0`?MXCDWA&% zG4_D|Hw`ts*LWqZ?iSKeOB`xNQ7No1y-sCx6CU{*ul8ae@Hc&l#5Gwer}9WiGLy$Wo!fru(mrJ_fdXAYHbjE1~)N z^?3sEWh-~6jSVY8NewLVoh={@DfG*&rT9Xh{ z_@aL|Su6gxWbFl4i5ml1?kbD|GxNu_w@fss1l0*Ao%QwSb)9BSb7w@Fua<>xj88lH zi(M{YP+q)G2h?<(Q>6$Zi`P$bdVeH#L}J}w+xaZ+VNichAEC0pJ|Q9D2T<>h z7~)^R4NJYtW%kpM;@*H#uNM_|6)js|5R+!+Z?ES)0o(Q! zA=Q#h-J*P_Z$ku~{q;sT|8C7;FB7a*-r4c7$?L6h(0Z@u%nS?$awdB~w&^zK+<{sQ zM2Rk9_BNopUac?CwfpKeevs#tQf^*5VE%ryDk^&)PjV)yERYXayioj zj{Ykyt#JRyQ#bp^Di}d+{sL$&JS9pDs@&I$ zh^rIP43lv3Xs*#a;H5c&VHqz4ULkU%@5EkI66(G-XH)WeRQgF2ob!8u zfcp2y{16B^c$F%Q{>uVEKB&&C{o)0v9osp}QS=n+i3$Rk(!y6r0Sf!?wgC<#g*$|S zY{tKk^b~i=)u1e0jIOS(4Rb?uC_5tlT8`B@M9*F{y3kZzEcZVBlSP`VrGmYk$aT0%m) zOHvx7r9(Oe1nF)j9ny6kzV-iW{r^6D@3p=&#&L|70Wz8MjVJE=y07cHU2?AM`9wFB zAfLjJlP0@oLa&RSQq_UjO3^o1N8M+q?|rOc0E!u{1PYM;DcCkObEyF`?Jn~_RC3km zq86&x%L%^w^MHao^lORXpA}37_tU=joJNu_{n(xcUDt-R{ro~EcTR9B9c`gFdGrTr z9y{fig3y5endQBKs~b>)k-!bKyTt#ZZOwb0prdW(Bdgb>_d2j-`AAMQ2=}bm#5OnU zT^kbFZ2ImzZ3D^AeApA2-jz>i73zB_`mcDLc060r6d}(c4HQO6$py>s@|!) z3$rYjEUuV_>)V(ufO{6O9TiU~ZHs?BW=~sT5at_DX3tM+@JRrwwfoHSz}DNfoL5bc z{2V5rwNd_x;;l(H#-Bhj%lxZ|=$tTQy(|)`TKH$FvGX3Cy;aTrEgf{nVG3b=zv^B> zKQ9;*pg$1*>;=^{sf>@Uu8mf7vIrBm1fCQMUy2EdV@*?9YHyCwH*@LI!Ho*w#^b$T zt!#9>>V!#8_x@QDeXfMn0kd55pY7vID( zv$B@gVHlA56*DVfkH{^3GO(b8)8B z)7GFxMjmDumO^}J`(h~`;%!dD)g#7~StNe()nwmaqHRc`P-jP7ZVnFi{$AaeF@!`U z$%Ht#gWQH>fi6^SrpY* zy_YO#yY2aY^-|o?(kn@&NT~d=Dhk75q?sa0afItjtQ%QtRp>{TmR9*1!}g2oy}uZH zuS~=!m#0iSFQTen+FyK#p)^4Nx}Ppx93E;()ICcIQ;29wdB#s0IzlZ;MiO(}%PJYZ zl3i;UkjF^8jL^rE@&pIYi0YG!5l~FfM|Dw!wAO}6|4JG=*%r7d3LEI#OVfQ48a8_Y zL$2rNWic}N0e?N_s&uTg&nB@JgV_@7*#4>adB&Hh@GyA=%uIMHLq7=0Gt*@_$BXP} zY&x^K_XI8KXHUsdDmDEB*=V(T>7x2uxW-*CKGpS&<*&aVRI+e(6hm07wa6DS)1o1K zjAIXCT`xpirI%t8Kx0R42e)b3#4j0kQ6ubziflYbuz2BiWuQ1v?qskw5frJ6y`O0q z)W~*TvvJoiOg?P#C1e<-i(-JsJ6n7phB(44E{ZK+n6o>|1u3$!y%UD)2LHquo@5#+ zT9;dl^{XG>%Go6 zPUA9yx#}Dh+h@q{QY^$;0G2!Pos2&8cNWat4cmN-on~Bi2>Rn|r>u*P#UMv}dKUe6 z{C_HC*|Q?t*!+e7N&=(-xoFsSgKKukBv*YY7CD*qc&`Jr7hda(Qdr)*`g)B&db`G;G`BVq~yT->@@ zs*H~QQvIq8ro=lZA0N5$%RQ#j2PqRHN+mq$-wW0MH;_J1b$AP48MdRIIK2arOiBvi zovj>QtgZ3sXuAy$AAk$J+ZR}@U#B9!y~w@)!?rUCF{C2Ju*h3iQ~P3SrqOmrnr`0> z_xDcZe}e%=Gtxh4RY!Wgv^!gS02%;#-W>HzV<%(CRy;#N8OAHFvejt=)d05-L(5Ca z7U|7vF^VPHCzU}jWq-5)%}XnQfG{xbyF-6>ME9;L3hjB-+AtL$NfRS67qTtc;>vc8 z{~?}5kN>no@Y(u)paV^37G=H{CJprB`(&tWxaZIgVBFiZ-vx(NC0^4zu?WG0{Ozq7xvy%k05ZqaLH9R)0IFv^O{0Q zUHx>^&rOesg0&-a1v~dAOA2df(-dOtB_C_*iW?t3$F0 zVyk!CClw2r&VRvq(}+zr(LC-JvU4U2Xbss8iQuGdkor=in{zzMD;+c!?FauN07E2( z60~{%WBZVMGhSXr8ai(IHYbEK+r{n!utl)p8U#?_|J5x*%YRld_Fu2+*hao659x-R z!hWq6YXJ)6Z*f9&i2P=tS!_A~h=2u#n7WILBBab>6SOJ088PshqMl~C{LEC^aPMni z%GPzv^$iZJ7hO&drLq5!r$of7xSyP$0P&xCDZ1f_94jzWA|L5&4Q7b=01xlM>IrS`Hhrj=zBD_CLy|e18yV(84 z!jrn|LFm6%Xh@y*KG+qBpWZp=3jXPW8sOL^grkO`yPyLpzOUL#K7ML?pn@_^rnY(z zk|}a=QfTt=&MgYy;)OjB@_Z4noZDg0_Wt;lbmI?ZKTzhr)(6?AyvI@{;8EPFrb{S_5@CEn-k!dM8iqE`_9A_I7;jI^N@ zDD1qB%p7AU6az|RYa7Qe$4LCVD%7!Av zj1R}0So5WSpaT9w_m6PJTVI>|PkMm+MGql|1r99}cMMQ;q4wqZ#v^23b}BAL&u*_CF2jtBfty06&q^0}v~qg(!Ff8CQn8fl74<4-+zflcu$WWJqRtsf$ROpH09; zAIYP(-kR5x;@YA{mK|p)S~{YV(V0~(r%Q!*6Y<9JZmQ$a3o>m;Ky7uGn0k)P4+=Ih zO5ZXMWlmZS?RA0bFNOq7hDLk78UWnDj^x%ajWn%C;jPXi$&swc*->vbC0>$}k_hPs z{zbIR7ual;5*0yK`O9>>ZkCw`bcS5 zTGU8yzL99;2}eKCOvzN8({$pPp{HM> z6of0anxO&40qHvHy;T*b6pACVPKYYIBm6ZW4^*AS%8n(0o90_4km}HB8sJHdauCy0orJVB3n&0)GU0zrYCTUsu+Ba_O z@~DPmh8oxZ+HbLChW2tV?~Lj{1d0ztTY?u%DblP2?8|%+k7x*|e}Aa6pa;BLb$mPe zEqZ+CiJINbEQuEX&eHz(Rl8BQU*Q~?1c+<_9{`!=u}b#QF#+Ms4%rDMaELFM4t+XnK5}Im|B-?We3UG&YAmi++O_4Y zWg4#qjxdI%;HS&MT8?; zZE8=v=0N7%NZm}Q8|-{U=k&~pbBObG({_$ZAwzBQ0)9763k)3~B8OVu84taFV2{eM zkU;33;!^m4I?KhhYC`SOfPI#c3G^KnF8+j_k^7@zK$3#{-L4Po+7I2XZ6iM}p(HfI zRhwV@oR>Kkh4lLEvuE~Uf3dW|J9SAuBmsmW0N#%N3*MG`kcR=ZyAtpSK`{au*!~U(Fzkl{8;3kr^?(ARhdmJY2`SR@S3giC{pu$>Y)20=) z!b*T}R{xhz*MAfFrA?zH{p;RW7j8Yit=V!F&Qy9o^vzDA#&mMQYw_Z2d{WoP$!meQ z!8~K%WAKE7&E0&z$C{@qx1xT1R&#ObI_-Q81$HmcXx@f`^0>$I;JT}$<-4KVP)DnK zyFl(5wD5odqVkDmKtt)LPRo4Su|B96L*y=h3j1oO3YXJ@X z0%)L1S(0;N={VBjswX@xUh?|~;|A9o@wiJN`I{B~pX-gebDg?g@l=0DT#Ipb{@}gS zZyi4xt(>A-uf%U30+=1Ar>8eDB*et4;hIiGQMka4>u;q+|EufQ>8v=cJbSzMSGMlr zE9*%EmwEYe_LB^Qt-Vjy^l1ZS4P-C2Dj|B`9menpe%SP(u(7nYn&Zav|>^SQA8!LS1J z=XdJ4`Kn=Il;n=tLCO!Pxnr}l*X<~5Abx~YPUzCt{LOO=&-nLqw4R7V-%&NLfch|- zZ&a6G+ABiJvnmSnBKAI{@bfon$*$`U#h1G&RJXQPHC|hs3qL}09KOYf`Eu7iYIz;5 z+yQg8(5$VO(GwT9_MjkB{Bm0yTsIe9>zPr!=(Xtodw?k?%bCpcLepAbTXCPKtm#Z` zpEQ!kX2uU))yryUyY+I9!A>&MnZBx!wha4(b(8cp`(x&3gUrVP3y+n0Rva7=!NiE- zEB^lNTTM$k2N%~X7Z=xIC;{j#$4k1^-9z_M;ANDJEihCxtj*F0?J;!y`dSiEo6yF} z4z`2h;G&@2;7Nn$d=3IzS4D`4D(BUOsE&t+vEdJ$?jY4o>X;+bSBGo`L=P0|M81r^Wb5g>N2y*q77I7{|Ala2N!n0{fQjnrh>} zg)awhbbRc4@FwSaWeOmAF7s$SJ{gJjdkc+=N;v7qM5KmprvujYf7UhiCP(>Coe#WF zb5BmL$*oY}OB?P4wn}twKPRSoMG+BQcCnGaU2Y2^UmssEXuYwDcq)Uq(W&NS-4sor)%J-r(w^xkv&VB&bklgy?e$^ z(*5U;eh;>V#Ksf)SS>g;1vh0M%E^5xDDVVt)atZjYie{@wA13$ zqvu*Sf5`Y|>`IeIRWx+ew@0V$Jq*M9502f7t3(4YA)YPYAn)(@Ka375`VRx?miLpg z;bO(*8RWQg;n+`qt11HL^QVi@jseSOnSV>8Xz}6S3x9U2DX(%>^>ke4FSgczuY|RAhNmX16AVcJHZ1TD{4yV(v~~( zU7P8eG)q|oG~dxIiN*WnG{T$ZG!dt7EIJi4YI`%OE!Iegia|JT2ZCo=+DXMGYe!HL z#=hUiMNyi%*L&7>FAJXkT!~qPo=^CS2PuDj#8x)DmGwEGIZaVr-6S@&bhf;2cH8aQ z;k}1WtM{dP$r}L`tuSxy+>Qp$UJ_LY8ON^gJnjjPZKwOkHQqKoZ&pU!^&M3EGT-R4 zXEGg!P9Y9^D8l&j6hS9k{@zxiB|iP{_`uwRb3zhRp3`B`X`Mas`R{~)dS4pn!3<*w%i`Jb}ZOOIhzsPDlI0ltQYMppOp+~#Wx zj+3C`x#MQ?Qg1iXGWwK+3|0oF))R~Kc0&6V)nV2o2dr4vEFui8+0zTvVaKbVi;c}f znB~ru3~4Pb!ulU0XveF6(=T6zr!tULsTMHDp<8)^DS-0oyUUj+;@A;yD$$Jhws-Q( z;g4>+*EPw7Rg%(B+FL#UNLOa64nC)^V;+(4d$n;Emyq%D4SejUbf`JmX-Z4~csW0r zs1ZFWAzw>oj~pE>3Mm_tL4_3>Q@N7BJGH?tBsPw!39JV33W^n4zpald;};umdkCAZ zH!)H;pk|!0ahWsedWW7?_9MNavJv9vS#Epbwa^ovCe?*0ID382D+<((fA+W8$k5?F zYdf<6?Oero!&w+LHBYFj;`!zK+TvJH(F*)osFzUbtc~zV%XY+G1|&Gt2l^mnpVT!abwQ;E#-J=qpM?qSrO zoTKC>yXYpg;7m=e-PAg>oZ(k}86fXG+5HA51Z*Bj{k?v_Q<_@TBOA^EOnjhl405M7 zXdiorjZJH)PYeNZAC>NVoy_3v-K{8lplN+5fJxLTnH%F;U^>6=GFx^BR%bJ>2(fW{ z`N@hp=$?|!KxHc}AezaUs#BV8;{oj>u!o~kQ}c{xk`9RP1alyD1`i6+mSwGILbtOI z@p(NGYxr+=c&D7i=rLG>B1oj_MWYV=uqqn@+be5RG?UmVlLst>e@I2`cGf2g{eNgQ#{*N1K97RmZzZDRJ>^@dETuHu7hOg7jOv2g1ta z7`@|^qX_|4PpM34BoNJbGM(CV;HlOEN7(_tCq?Tz+A#OQ*$k>vsrito1ir}!#g)z} zL(!n$8KVl;#c({e-f>rIuNT8xok?>opRbBuBnK#w z0<+6XG0QsrZulTbFe3QhrviUSNehL+C#7cv1FYg-#CU(u-Cqtj&(al?x5h#IHaGFv;#9=manuFy;E`{|MXba8 zCGkPi*_cYWW8=YBhCK#{1{?e!(mn3cX*9U7U-_pK^@M9Ei?cO7ZzyJT*)r`wby&Ek zn|Zp+T&DXDE~vEmsB!IX&Eot@Aqz|2IKu2b{I2IgjlfjSC%U6ul(#RGFUTD!aXwe! zTbb=3^{nI3m9^Xn*yGA-mN@1|o2v4EH8CldQ0=O_?HQEntLaPUuAM)^snoJYK#roz z?Pz(en^Kifs+(pTj%J^P=QPG5Qn#PS5MjOGM}X#H;tXO7NZqAJ@AQ*~RWtDKWw1q8$!YxR~Y=;&O>4l{p@GkK+5pF3Qntv?x5>WSgcxCbF3`@6zAZJMs!SY`{UBK^Tq~2u z?aYcyehQw{eg5g0ucfiSGKl)-3vSPHjnHiU?wTb+RTQDxEa8}E|MDP!X{#_w*8m1G zD)Zu?P&XFTJqiFQ=PO)^k>!O?qYm}df>MEjCLgJ$jNbh02nJr4sHpHy1pyeOM|Gv* z!8j#EZ}bFNLfP#C8(7OQAgB1Duf^;}0e7S2qp%0N9dzd%KlF5btXYC2&XtvVAmn^o z5D`Y)(&^u17?AMl!N?LwVWLo?xO#3u@*4b5{&2XO?qD4 zBjXy>{dNpLM8Sq#m9 zW?MozE|Z5Qi6&0SYuN6*&!f_%oyC^GW@mFjS9|+j8@rpY8FNBk#aJn23&`ATthxuj z^DN8}JZrQ9p@Ljjgq}wM`c4RAUOtA~)hib?gWC<~(H3absF^A{Esh|w0G zi7L!$y-J3Ac!^7K?I7?mw8&G;3&=0B>RnHjhb5UA?B=fO%?SL!VNUNE za5upyxSsiL4SI<8tph26eiQU`dmMo6+STuW0Wal!+xVOAgpo#mz3%pgZ}lw?{xxmp z+N%+|&w7ozlCvrbyi+sbf($U}@o@*&hTpoxL~pz~ABr&X!@5(*j>N&$FPhaDUu>YU ze>37%Z$^H9t_=3g_vJA}qF`Eu^=Q1>!*{+Y^A*vC|1>V&NOTX1!tCP#_wMj%c;N!c z=amhXk(nJnZi6LIhFW5KR>kR(7|bw|-EWU{M2#HgYewun)yG&xgZhZOfJrh9+AQn{ zI3nYQl~~Pm^cqRueh>xJMM{CSzzP3ycE6{UnV3Aa05HrLP*;BNRj{m!=#pvwl)$~4 z$Y0B&JQdqGWS!5!ZB;=uUr@y-^Ex22JkO2IjsjG^N!MDrMT1_q!AE{) zxqeDYtEOZI##p-6Wbj_7r7y542I}rI}KwAq>cq$CCMm@R4R`pLQMJ*wAkD zc}Yr&zr7`BRk|B;-MHfRJgX8CH>y8adTdp%Mf&j@a5f&#in>_4N`Ui2|BLogI4KI5+y|*EbVwbPjO1m!0s{Pyq=sRghoCOg z&P!QIjiuqtpL-8mX-;i!EcUMb`T9bFlYj~U$=i^;D$00PPQXJ;f=HXjx>%WjEi}pg zjVr~uU6@)j3$sD6kA&&dJke4}{EN$m3AkT!V%+Vo)yTS|Q0kq@xQJ9gRpmHC&wP$@ zY0P1tLxBPD_1@gocp=|QVV%OYZ0DpGj;Cmn5dMijG@SS(HRiNYm0s&L3H= z5Nqm+<7uo-{ozd=ZF^T<7FnBYaQ2O9ny7nKkv>Zs`E)E}5mK5&m+Q|FS_D1d`x2&U zlNUVab>KhPFx~V$^Bc?S+@#xfHKH=sbD@iuVrL}WF)K0HF>0boS#OXbSNj-BCJE7e z14{h5>rxZMv-0?Jp~();*OB++zt;5TNy5S-7ojJ=QQ9jRNGCigjfen=(?ft?Ao+4N z=50b+ebidKK4=o^-|cn<%U}L5C1x>QX$JgyHWU2ceYZL#Xt}`fxxZvfIoQ0rI{|t; zi=X_W&@Lg5bA8uav~@ZKq>sQAbwcbNFhqUe)Up9zOYm*i8(>)qj4i>pZ)r1{ou8c2 zZ3~}ISGqP)P1fG2W*9GE>4esu2}#?S8d|P?0EC3y9fqJsKzI?br%M43+a=>n^rOF^uc`cKD=w+rIyvu?oDQKh`e;2;_>G2=@w$wd6Ema7z30U;2LY6rqSef z=<|D=?YaJZF3mLq(YPJ-+3zB9#XD0Nd<&z-MhG319D*SnI^V}Q}jXgTg zqPu`a#dK)xaqnm~Jt^;Jy7crJD57yb&y+=+&b!&cTx}gw>PuDrCK3`mc&ks%!9VNK zM4@d6bocfg0Am!U;Xhf{gnk)FWcwueVIbK06N5YX$!W(y@Jd^EQuB56n ztUdU)u&9(BA+gMz2pO`o@oirsMkE(5-ua%5Z21oXK5HPGpY&9V$u_Y12`xn@XL)g+ ze}3bB7wI;PD~NFuQa|x|TvfC6wnt`iL{nt^I-_( zroQ#Qy^PEY)!p`B!3(H135bb_McjY&#E%}m;QpFfxQ?+GBoyKSFqXf1I{cqZuw))+ ztWLs$+z0?EFpl_VBLtm{>Gz*p`T7P-Yxbu4Ok;K+4d09NdkERt*e!lVH9~tDpE=n~ z(FY>lSZs)7Sks0n550v2bYOL0?9Ma}bMBBqt)2?F*4LNjH0Jj-iQg^di*V=mD1A_9&7Pl^d^ByG_uPed~m)Bj0%ejB{!B-f5FkEO^o&1g@ z`LziCRDoQ#!HG3E1tBOuLl=+4QBc3-fi*MgjpRSgP<1awBe=|&IRiJT% zeoJ@TAkxq~8kpcG9Xt7||CQt8a041KYyBFI`WRcNMU{2cjSNHDuKl+-1@7ky?6OI2 zkS9w}>~qix<3GAXJUEU0y9uuf0EL?*#LWOVAqh0t_>ayX6Fsb~HZ}uJJZ!O!PT>JP zp7bbWgOKJubO^0~ma`$UzIH7o0cn846oP_jYgz@nr6No>4*ej3+11L1VvLy3;ENc&gqEDqYyMEZ1n@*m#K1iM>BDuBXp?_pOQ6l;y zncW0oY~$q=BP%N_Kfn8IjqPf8c|%;3c0KO*ZyiWuN8MHcCFr%LIT_>J z&gEw-&Hwnbp^5U%#O|@5VeKz8X?|AF0+SU72Q($50i8I_#Db|wrTM{&Z>N~*K-ae- zCu*41>nYHvP}qU5zJ9Q?edm%cBho%H1=D81c9AGmv6YZ$icmV}HGViF!>yy7=y!|h&9 zeXtsC#*Ll-vbQBg5=*=jZbQ;ltW z>zt^)&$O?^3LW{$|6ymyeFsvz7vPRV#_~xPHUChj#nI)W^9Z{HyyR zFd`zaE3~yY8`nR74!E+Bq8nX=INWUA{`y!h==byT#8QqxPEf8_RaX~VIMh~<^=x0n z5I69XY=X8V@jU4(bHNHquH4*Lm1pi3aS}A7F+szqnrjyLmfZwVu!SC%_=TH88L5NZ^< zo*2&mv;47bKg*iDU9K0k=3Nui;VsSyw}qLRd>Vhe+0PEEVLWLIIk`a*a&Obo90og^ zgdFk0p`XJT-g9r{BW=bvQ$Pe}{NWWS{=Q%%O$-6|KLi=H>;w&PS2-x(#6&#I4Liu$ z=K!hEifs~UetgUiG%&8KlCCciudG!#Um`ltM#+z`4t)_4u-tZDUG>ezp0Ewe@h|b6 zIaFvI3Qb_OFebtgp{o?WSX$0-`jb1ks9oi4(phT~XvXe_!ge=UJGdqYZC_hoPgT!N zf|!oR5#>CVw;a5XZe#A$Ev~DBuBW(|YZvHyUFWCY8>MGRR9Qaxnyt9(Bi61J(`RCi z{)!Q1AFSS`{6mUGgqsi>FJfDTChgB3RL-+-)Ed|b&AzTDZ%uagim!i=6QvWu5{Ym> zx8vud6Qj=?r^GynrLSI!5NEvR4oSH2BM2gLDm552Qk!%=A#l{;vtFAt@q0SKmuNPZ z|MN>nk&e_yYKAChg`RYyH0E6oAFjh6Z5_V!J&v2bAIqA@1I5WiUNA{&x+F$x)NQ?_ za!}RGimEE%<9;t7Kdq~O5tY@U?{@nAIO+vKrwQng3M~gCzXVqF`=17vSIz+CquWLvf*=xFFJ~qzA<@j%( zf}z;i0a=NIO@t&e!VDp_UnpygG9r+uzf;V>I5(55kJyT3o-UjF>mJOzJ~Ad*KU?nh zzv!Nm^t$pvEhe{H>=Bn4==Z|`6TsZ<@={#5(BN}uax#`Z0y>efS$D~DMkI7+9(``h zb=Gyd*@9dWPCAH-S-)izm(tE)@s4~rx$&cP0s<G`QlV+)@ zu&Gb7DH=01X=esg689IsJI9?pCXG@nE(K}+#XOxIc8RPu05Ls4>U$4S}N9O|O2nteTjI$udJGosV{DE%& zPnxHP7}Eh}Nh&JCI-Ad>gJ9O&km(w`jt#Qr9mL51pR|V4q*{CSJMM`P`8^Jtfm)}{ zs;%!K@ICQ(#diWCsE|$*IyrNRv_qOH?~}AM@jxH^J?8HDE%hh|;aw4=c$4GmWMEEI zAkSM4wwhGD;t6{fYPLD=_WW|M!WeY--tF%@(s-@)Cj{>*Dm<1(jAC~Ty;mh{(8M4>w8Tjms61ufNFxGglot{<-*?nR&=f8vAxnmH4GKNoScFpiQ?V$5d4tU2 zMjkS_`VxawqzXS2zyFjsXmEy2RhUSBqeBC&3s8q<>KwlIY{Z~D)N|X;w%+s?qh4G6 z=?sohu#!HDs5X(mJv03x#Wt|Mn1_lalHx>L!E0i1bxxL=vl6yiZy|($SeboNEOQfW zny^^~Ur&dv5~HWQ#7=Sddvw>N-a?QYu~H#7&h)A(%tOLPG=>fv9ZxNOcOdL|s*@m} z{Uox-(8pX&*(JH>qqxO0RRs^#N}g3y8EJfFx0U4y>O|A4Weas&lAj@`Z=PV9iysqW zicC)@F3a{6-URTn#m>WeRAs8rEqb7=Z_K6pO8=~qrG_2r`uGaQcs1~1-wYl%neeq# zb}IL$(hq^XAB*exh@w6@zW?*j+jdn?#zoB{3{we< z_)U+}_N=WN`2L)pz&LwXbn~?1DSPW1vk)gH>*8|@36B>cOfu4peTRff3CE0~A1p5V zL->TsiZ0%ymtDOOh<6z`{pnQydzASer!&lSZcA@L%J>*(wFxTy$Yn|RAiR4u!edXG zF{n49wO84zYmE;2BCGr;?viTxF*wf?rvX_-g?q<0Dv@OJw;aOvVf_>2(%QDUA~bSD zlB8RG3SRXfZ;D$lcB}Ib>4M$GfqV7VBg1u6>7D968^T)Rv;Nq3DlrD?yX;Gtu7@5F z)!vhMjfB-r{CM(%O^uAS8AnETMPqE)(NGO$$o3s}`dp)?y5lkH!PkB#toj!$KhG9L zQ+BIINfcH1)SP@j|DOE(OLiIIk}Bl=kJxmjxQ&niV~N`jqq9^`1j6IMb6r=%RkHN3 zsZ;vuHK6evNlVe3FHT2$izoAVZTe_dw+`9{c@0UpE9wc%PST53q*||7-p^o=J0rBG zudVD|oN?qb28~Ov%f!wjBa#;b`#TqD?u2<*UuapSZV4JV(4b*?*sXt84{Gv~vI>dW zcJ%oGl@CU$DJsp27pA>4T}`cUxgP40O&m_)blv_l&5ZKhb3LU)GUAncLbS6zijgiJ z0yMJZT^0IUeea{zb)|P>_0g(li)FXeQ{oI5xe~=Dwd3RdpXwahzyQVD+SMo zx~XV{=DCTgo|X~!$k{=|IBR8aCMdB~3-9-_ zp4HQ_s|obc5dHNYyXO*Or*C-{aFA&dCZ&C%S09VQ9lZgq+&_E9kw>DC);IU>R&m zejyCCfi8>IE6*!719QX?hhyJaAv?Q7J`O7AyEW9t-Efb0hHf9} zy^ekj|D((wFHbN`8XRAorX%zr)=be>gaBBdJJb^<2{b6`02CT70FKK za2eJWf2KO|Y1!unO)`BoSxL~E9FvnU!PmaQ&w4>EJY$XTjGYklk1ETjbWWK0Yw-?f z?+a>gin5a}Ja@S5qy@z27Jt6QaxyM=#&}BU={gnVNHJyW zyox{q{j@Ye{|pCd6y&p(>@AF57FS274A(-@aw5%vkpw7BuH{*KQipHaDSr=5SL_IK zTgRMdle(eu;{JLKSLW@xS@VmK>#(0O`8m*j8EL|mJ@@*+$~F@3%*sZVM)PDc>|?ZZ zlB`AXIpp$qeK6PQDWd-E1*!~){+%A8|0*Wwo~~?Iv>t5=*|eFl7*}G1tVB%2KmaEA zEl8|Ee(>vMy6x)H1ANi#BxM#zd%R$c|QgOw#*j;{N|i-cdl-_*Vdy zGS=#13zGMTKJiZjz#r`UJ57Udjg>e};ieGjB|WNoP<=?sNe|bgh&uoL&c9z2zyr)4 z4r7pK)|rAs4hbZfM*mDQXH#tz08&uQEDg@4oJl&LFH(gQd>R){+~Av~9vpBr4=wGP zDa=7e8Vb&P0Ax=Cogi;==mSSGCV205a6D-*rA`~zxdk8IAM=kBXFXW9`cqQ#;m=~$$0u<1f8D>tNyLO4lX#kcqQM1; zeGf+x7zC_D|H##~wElskIJpT~fak3p^N;7fE~LvZ3;$**pXP6#jMM+GCj)|>X#IrQ z-)vt6%SviEsszgz;M$!SpUGPFbEZVV6QhC%O)%P2eNA(G9E!@TrmV9b84zc<8X0i2 zp@HviQ_1+OoD#&2?c1Y%Mc7~g>s2s!Yw5|WYbYsb7&M7ePd6Y9L1tk1lNaD2`60O9 zDQm9(gp`Hv>Q~XWbJ6>jA?>a1pzKn($vBrdnsU7R`AH;NGE1m4;(#?% z-*-A}R%NfO39vD~IO5F8xlG1H?xJX|oN~&#vMA4MAO4obd|8L(Gv9@BhnUQ<#5OLN zgO$OyZ)l_*5En&yPrAn5)7_CHEI_b*lbhnV#y0-(TS@U5C%fg3L4%Vh(!(npIqAbh z{1qc%Mfjuid&BdcA!P zLsxNqJIHZ(yT`x|wlJ=OaL#^D*QmS^jGC;yV5O6!nhv-Gae{4fGy@MAJ*)~&oL?WG zz%0aMmNv=~dpU+PMFi9cyzRc+jh!i%?T+8tk|gKWoa2OQ>GIvK+&6AT23Y-E#L5kG zu)e`i&^KF~NZ7s5nGL++6kAN?I2cB6RCkkKJHe*v(CUO>xlE58n+e5o@`t9PE_GV2=07K0t}!x6H?X zEf8#^inS9?Qu+@AKk;EYxxt|66r|XXvEEm}gPCz!4<>)`G=Le6AF!5(gB`3@x*Xu3 zeL!2v!SS%rY##HEgXvaD{g1UH7(-Lr%?;Qwyx}COW_=Q`uC6{l{*CG9q`Val4o)p( zu?3V}#_~;=n3y^)8=yxoB*#WaJAA|Kz%u$jUP=_clFkJC{>#Ub-aDzOyyFL1c-KGz zX!CN(;!=%=B{k4X$m7tT$1=;RJMyv3+H3K{{@iI9UHZ)?Rpjk;Vg5n$!QFJ#W&q7t zSXdaSN7>%_eZq>R`8$gcmOyRzb8s*IbF~B;mfw%?)Y_&i^VG>jyGDnT35r8T+ocR8 zJ-zT9q*fq^HP?SSK(0_&NEv9J?vgtQCF0S9yq!U^Bop!;4J~UKuQ8dN;&jk__G!hE z*7thf=g^-_2sk;)%E~6P7b9E$8UZ?&_z*c}% z#NpVg(zPvXI#I!{9}T{Cs1vGmKd9UDLeXRR?*#T(T7}urglR_tQ zdiA?d;Gn_dWP{~r78qpz^X=g$Y_Wm+U&qLK8#`qa%;)r{*20y5%T9E1{9cNg#pd9h zoqj61UXah7nW$)%b(FiX?Vq}kIzP;i%Vna`Qj=QtRl2lLMf<(sbb-kVYgd!YnOR$` zI9*~U)1Fcx(KWarRLSKE)VcY!xH!GWb235aoRX3`A@@Sc9d4T!j{)#@Hg$xP=)SLL z-3s6-%|)vY(&;9%S;f>C;_YqTnD3Cqqwz_75%)juJw4kWKscXOmb_O_#LUe%a1@14 z52+%=X=}g!8Esv8VFJn{;}DO{a*E#L_4_N!V~zg|H1Zf5XNKQ zD_hna-X;kbw8mGK5zJj6L>s8N06lLeYi{%!8JNPED%?S2&o}w@z9RzDh>kP7nKpj17gdHMeIy6qAeD8D0fz{GG_kPhpqF z_TT4V9wG6o_fvFD2N;B0gU3IN){tH(Qsk5`%kUE=YfIQzj%5w3S#lb^Q62>El#wEbQz0$4Q>%({UwGZR!;;Hd z&bntItvdSp=`k_KojCXVccPIgDbFkYQRbS@gK{B;wj5Ps_77Ri=4y zQ7K)c?U|frP`#*NHtMO*KCy#him`3IS*TwR5dc*X@qYxx|50uFKL@R>Wjz2}WCXPX zW(p8YS^io6{8@Osrme?(p}lK4@wQ}l7t*a@_ywy-YFPMoVdqy6n(XLun)8tBiGlYm z=@+AfZ(6qc%KpVKyG<`YkALZcv}Nat@)ApoE|+9wcphn_Mj!$44Fpx+vLnIV<0hx@ z&FG&0!o0_+892qzUUHRp8r;w+i=UF4k@AwM;)Y%Q>OlWe771oVShD5zk&jTl=ViY? zBF0K>$5b&zjH1YOlWpT!_>hsA&06l?2zoOUJ*0{MZMjGmd#hbzj5kEQC(zpSsJL9z z-5nu|xx=5S z3X#R*$ymCtemiHo3*o||-Lz$u7i!+ds;yIgYkoyBQ|)U27geNJ7%rs$k=$bv ztE!x&76W4sW|CnkE>9?>mt7{Y?^4pV(>K|dTB3eCy|pAu<@{85sor%bprxfQRw@+TNsz1l zTf+lHzgokc1k%P1f%ljE>QQm(%26sQ>-CsKePzrW!tOR4Ovju}bH>lGt=WEGlCSN4 z_B&Znk!N5mWjv+!7iY+wCNgUgkYcA05Hw4N#>jF|*$*g|v?%ScjX&xuWIohgcNSHV zmcJx|teY#aePf`LPA{-WFR52TG>w$EGplIb;M4pfQFd|A1BMZDLH>9RxwvFxKC z-qiPq>CZ|*`F=MlHs3Y29TLFBRZZIBC>tN67kx0yG4AbIap1Q3UVL`h-#}e+Zy0@C zv$VE4$#F*!*%h<=Zg-?E$34R}N@GG@nODe0)|#m>QxM@)&Wh>bHZrnF9cvf}8a!41 zE!dJ5#qs*nE?d+Ok-hMMeNl~1Weo5Pwvr`@((Z2houA|Kq9n)tKZy}nY!o(XZI*Fu z<-Q3}40*VIUKR_FM#O4T`;$r8YSL=wI}~NT3l!?{cOw~1=-fkz5?l50YKc8u^E3$N zA97Ycqd^qCch#)#Ha=!+sp)MTblac9La*4=-l;A_Y+>?mA25}B_0gRjMT7ImFT_*#_ken8^i>xZW~PL22>PWX5mo_~-_owMmSGug(pn{*v9Mz78tgZeQ&jK^cdSo2AO0|82~F9#q)D#uem5z^soC|6CgJ zJUza6GTX)e&-|I*^E9bsRlp#*oY5&MU9cN&xy^_q`Q-3MI$wP9Jh}MtG|2uThQB-`HT+3153gfx^(h}u_4hC`6PyQLGxKX+Lp8L-K!`@ql#kFnQ zq6rY(0>Q2D1b5fq9s}d!KIPLQ|6+qz{itAdD!H@UJWsTVt`*_$UzfO8zktF z8<{7Bq}@VNx4wnEI9$CD*H7P zL$Y>YDe1i`e7eb7wY_`cD^EV8pdTMiMr(h!t>cLFwBZ0K;DcC&_;E?Us0(MNjqnYI zF2|u^&NIU1fa>Xw^=?rEd*Qf`nP+99Fs^aun}iVcK3+T>m`B&AL4L-5E|)2bX(BbY z@{7l^ql}VvHp5?=ojeWK-Q2wcz_A~{6?^!aRdgXH5dIi67SmqF8@kO;8*}eF z`YD7|t_;Q}I|my2^}Osi<8kEgN#92_QLW8Q_~wOD#5iPCR}BA9WRg3}50I!2l6mad zVObAzqgrxCFb9a}A>c9cWZjm)f*^L+z#pZY9a zZx<`ZD;1&=M~i2fj@XOi&b zEOycKILX6bfBjT2Ns;cwMG4;IklLVkvu$i5&X~+%P=3T8yt zkHa^vXXo4;6Z|xOSU2Fs@FD0pKhdKxzn~Qi{snR$wu~~uih9rSnphabXM*sG&AkPG zM${J@i4%m_kxeTmbFZtw;PK7zB_^M%@`=rwtb^H2d?SWcw3a0T*E(j$ZUv{xg7WPoYb7h-XA#q%yyn-rldE zMo54ju{L2UZ~+-c04GjFt=;_o)LF;REz`$lALtO__$Bd0NDlE_+i7~*qwHUJUjfy| z6aOHecUmUv#LJEmTRbmGM*U}A@aa!CdLZ15G>chA6@Iy|IX|w8bWWG&7!|^!E31q)N|DIqi)&gfK$K$0vy2E|)*dYfYTs zX)B^r%6R*$`O_L6Uc$Ns1j?lM4gJmXXZx*}e&5e7h6R%5($mvPU_uB7WNj(S*46j};`S{VT@02WrEntNp zKK{zT55pWN9!N#qV=aT(rAPREMVOy83+b4T(?pBe#W-(fQ6yp?{UVrr(x4vP{dad|!MUg%A)?7sAa7vBDHd~xL5mgZY? zy4>+ejkO7zsDr2b9w_(+D*uifL_Zo$ubg6N_2N zofSu=Spu`AF7baMR*C{ZsYnvQ``7_5uE%r-Y23Z(03`aqqQyW>GMy7a`1mhA@awER zYMH3RND%cc7Up292Zzn z7Am&WUAB_3u>F?E>IXnx7o9u5D5Z$RInc0ryYF>zGr)O7%ziM?4niKy%!gwQS*xW0^tfeX?g3^T%~!5T!N=8C3A}3zf&7} zubOoJGM-!!(F@x6q)O|M)mD-#^Is z%7O6Oc?T9+n)hIy+e~&mq@=Ozm^3ad{%C__JdKxt<(1V_IJsbPD{k&eIyP8!Rk7V{ zdr$hieGElpR%X_&tl;RLF|#06TV+ctL^Hb7^+$U;*yTJr5AFl_!y623fV_wnS_8n~ z8pXS}A^_2o!v7RfFoCA=d)QN3k6HB>aJ|dBW_?h%S)j5aEhl?Ey_96pmQx$?!pmm3h!4$l&lq{ zVe-dB=}X6v6#&7;7>m=6<$x`!F8J(#TuG z+gT?{I8RtCjgwZj>2u*8iptEmY;D~>uoh-j*(P?z# zMvKrcGCCVxqE2StE&sn^(@Zt~k6WyLbUcDQxk#7g1xR;KmhKJi0`at=f^}L)z7{rV z`2pN=aekXN2ywK*CY`2Q*Q;@Oy|(q{BUS%V+rbCOAO$NlTXMic3k6NCFh8FwWNP|j zaS;)p8z1!CX!Yk8`VLiUD0VAW8673%;TWs*w}E&;Bup~QED^)!*Qpw0-|O}Do-f~~ zgudoGpHFG(JH9Gd<;#GxMDO;&GO<&SzS?!LUkC^KY^? zwtr=9YOpkwm05ZbOU@ZJN?u0=Nf0MEpOWz%k6NF^b*2juk?~;K=MB|gNnfWfE>)(0 zwGQLZE1zq)=ZeJrT7^-;UahCilx^??WVzRev6zF*j2d5Q2ka}M(aG_o%2sZIg7k&v zSvNEW$CIG|7biufAqx^rk$X#c5n$&(5WSSOv9^X(xG(AuB9`t9*FncOKHh1nAh3jd zzZ=KQKge5t50-Nj5cO?fkqPH?#IHr8#zmL!-+Yo4SN`jhELky#!0XkKDEUPwxY?s0 z^V`%Lb_IW06JH}@S~@4tn&q>%C26(PXJ@m8l&Z=F=P8h^pBkEs0o+_`4uv%X25Vnr z;$e{(H9Rabz@)(aan_~6QHB-c%WH3#*CTy8Rd;)@$L+kbI*CQsa{ZJYU?$x5v-Lkd zDyD}iOP!9WBscQO(r5B-N(FE-Fd$d(xa`kqK7Wpb@G2-jE+OOSIy6oky#=vY26%64 zTNZ-i5E9_Sd{hiz)vn?oE#6I35&L(rLY5L&prZaKp!J%9``YcvoquB9_3r- zvQH)wwhK@)4Q{59!PI8}FKs)FPfYZl^u<0bAJ;4`_L%hJ_w_i228+)`&b<%wO-`AT z6#&?9B5fc1N^kF{#%ob9k0$Bm4N?hV!8XzP2_c-e`nJh9XjCd>ZGWTj^b zO1-+A`Go7yw5xm8aavKDfAZAgmRbHxFg#*lTZ&K$4g2*-&!%+jqpEQi1?p!`IaT#l z-=O+0b_2ivn*LOP!N9oviCXG;=zX(HSq7`mTP&{*fU_U3?oPdgQ%L|cbAki%auP0H z+Qr46mo}QPrzq9P)3(qme-L=ZI9vqEDpC~$$aY7H(hk)6H=ixEeEOam(&4uQQ+rDD zutmBbGs)s{MWYcmJCKLPEb&sunh6u%JRcbA3cQ-#>U*9K=YDwm@>m8=3+5?rBRnZP zr0KM0(7rP++I{jPavyUP>A*ZVn9=C1%||CMtRgvhvl~xIdD$q@7MjjmgclRiEX)I~ zp6rg6p|S-;ZfUAIR0%VT&yUo3@^)*miz@E%P{pms&xFJnEc*67mU{Tv83oE&J zZ}IFlU&d(kS)$h*!+4<2Z4``kEaYnyl>y3*|1XiGAuzBcwgxesLA^ zY?P|%GQJ|jROGkROYJE~^Kx~lR?5A6cT@ zLS6`X50uQ#eG^lyxB@}*)A|< z6It~{F8D`Yr_z_KkRXazT-&@hm{}dCfDWfumQMTFsgl}jBA$|>t@l%ski-*_o-~rf z5jB`7KOePTi-GY_@Vx-u(W|J7`|>nC0~ZnEaNudimm2GsiQ>DJw>2$%BszAJ&PNnD z{}ggGvjY^dwc)UHY9Hy(Z+uF894P|`q(7v8(w!Uu z(3G^)K>YZ6i{FzySwxQ?G-^AWE&kWoKHcW5BViuC!{Ybd2MYx{%62Bt)q?Fa2`6qX@IG7ye8V z4KzL}iKI3(zWz*nKhyi-&DO-6WOpEV2rZN;xUvMK2aXMXwD0ucG`q^q$y(NtfKV5L z6dZVs9smYvl+)sXgnd@b6u_MeD#zC1v6(hfCm?my!Xop#3R$S3JY+>d4)T8aDc|4o z$CZ=MYxnlqZc86GvWfZ5TotZ00K%Wjfn-KI+m( z|8I6?b${*5xK12wt+lLBRhMC*9}8?!W1YjjU0g6ZFyG%tl{k>lG8p+7JNjH7ag!acN!oO8aLdWq&`Y<^p4incba3<1S@tV{u$q)5P(rSVMWB+NxywB8Pr zTtr9rQgl=XRKvq%J+7-G4+=t!rl*S>Nn&s0l;xnuuc8*W)){tOF0--92?M)|KrOV< zK8w)!qI1-nX8WbxpUjzw3x72pZQ#aE#B6qXbu}KMNAflB-}F!^bpJ9Q85?g4q&gJe zk8)evpV>EFPq7?FCrN)rn7X>f*-8F@nq_Y%phxXcc8S+#G#(bvUTXC)GSOj`m`XAU zIXKO>cyyd{UH6o}Eg~X-oy)Rgs}iJU=nVkCSVOwdO8(tfzn3=e95(F%IHtS)ZJoi)x-vRmQ4>cjhE6peVG$iX0 zbeWc3&$fo0E_{U^sctgRE+R(Z|56((r@L^f|5JFwKh;JznYL;& ze=Vz$un1lHi>x58U+28W^cGSyq8}&T9ADz9UL#^;G+C5!b3fbel~PdNJ>3MaJjFjN(&_>HB^bS`3@k11y|7tVu zrdyn;WxXfw-x;F2eXq=v_$!e$glmFXHq2X+{&2u#_&f3=-mA)DuIk+J%+N59V0>k{ zL&4`{dx2PP18SaW{j*qtEex|&86eG~=lg{q!Zx|&5~Q2sv{H!{H zk&=T z_F6n0Y>t)V)Z};hL}k|h3@a-vZ2#g2B#|=Tj5TTZP+aCBYG0EKC7G%4SB?U`VLM(y zvC=fj&UcbN9G7%Pf&CyP%Hwy+s~NhPHJpIi{ZrQWf70C0&yLvM{1uQWJjaS>k`hB; z2^E7tO*U2`P_JK5^QEy7hrREY^DFeqAV>Jgf{JilTpX^*bfZL9PFoPtD}e_O0*?q- zP<;UO)W^^#m_o2Hz7Lo`A5->Q9#b(s26+uBA9bI1U38y?M05D&jIni-^>oz86E?!~ zOFz+!kN~;Z7nTz}S_W2;>Thg~u=*~#DmL1>+~KUXif`%Tqc$ouqzqq+_+Cb@mI3xl zfwd3Fs6E`3J^)qOMxSGJs}Rp)M}EA(LbO<6p4n8OC0lf}@$;?f?!tiZCj@2^*3_<5 zCiu|x(5Cp%jhv&<;JqGlqGXBD)eiX3Mc4FcQXjJN{`nn=$nNkl`%?hbMj5|me4_4> zIZl>#W|DgBQfNS~tV>N42$}e%2f`fu(sZ42SPGiYAV<*!@h0w6q0F)cgg`FWPL{9e zVMCnRlFJ$RgggyQ$np(D6+C zd7D~L=1wj4Eb%m^{>cmNJEG5^yx~J2Vk8IA26MGJni@&!Q+@f5jj{4xv)z_bwh2vy z!!@vNvB<>nc+c$DA%cYZ>FZ@n?n8O&hx6iF#UZvbQZEy3pTHA`9_Ay!D#(23ga`&c z%A#Gn4p)$6#yfmGrNS#J;VCFy{2kvQ_BOnkhLzkdwBzA4COL(#{QFM|fQ0Gdoc&<4 zJL7i(pQCy}BFR*o9_iX38XJ;<^q%VL*QS1_v!SF59o&|E1&;*+P{0a8Rs1W@-?m7=WjO?OgHT3~zq z(r<44vDn=sQ%I)t*1{fAlY3r;j*lOO%E|SAxXV_5@I(6=V459E(VxFJSPe{rh#|cn z3KbRIKZ_SiecCz`6F+GrWkw-kDJWcKvHO2zL6bi?5npW1kpKuwoyf0Ty%0ZeGE>~g zEG$r7gJIJwpN336*ZC{|U`&0aT0y5%o9aE`V*l`pW+vzD8*w|kS7?gKu{3a02e>%M zG-RWtZimahB3Bmb&RjFdf<+rcx6mP9sHg$N3ajo+^k8wZMPeXz-EEyot7a3>ar};} zo7@Ul^?_k@Dh*}xLHNwt-%Y08TT8<{8lEMcedjDb@EFMW_2WJA&Sd3fSd4yUE&^i_ z6$EH$xse~?A8(OpYfmw=8Oq0k z3saGWFahvcy~29oaB3qnOT>6RoT&Vj&)SQ`I(a1}?w*-S&mn&-;r=UrFG<3~HQDh@ z)x5@{FJJ1-LX+~H2A=7g6U62_Wy}pvq=9GZ*R%O2YoIim(S9HX*AU1(9)-cR0XblR zrG+knWHYwGlh1`&-Ym&y$v^soG06tER^1l8#_h%I?CkD+s;Y7UeR7n@1x$3Wp2XIC z?)%Bl=R=f4vCi*2b0QRgws#}ep$E40FgoAqWRk%QJb^VN#bTB7coo2h)+0hXh#bA5 z#0)6F*GNzR4c9&$*>|+iifED?6WLK!BJ4YzBaj?hRuF=Ze<@n_kB1?`xUL+Zd@{30 zHdyy|06H-^$9LW!&p%gXEc05f=f!)yk@sRpDCsH+f?mhd1ELdG0X@$VfL^$m_U)%n zpMad=WglbJd+Usl_qiz+cycH79K5Xda~BvsBxDUja9`4m*0~$%$KE6&-?gd#v6mRo zPAT+N?N3e>dk1U2^7dZU%!`eU-P+n>Yjis_vai&rd&=G|9=y(f(}78PKh& z+8;hV`ME&KYxmS@3oAI8>pVt=qOZH#;%#jmqFk;GoAq(2VOIU|DC2&^!gY5HwW+D; z`gd(@?W1+pWFBL)gPAfXO)xni-;0h8S}@ld>gLND2g6^hMZS$|6QI1MDOFP0{0OIe zb0m*=%44FFAEj%7rfWBMvC^fIjBSQ6SLH49^K9Ptw3eXp!b;uCs~$za$#6yg#>2Ce zB)u2|-2I0Zy9i*O6foUDlhX!ET#UGX(D0&t;`!>O{`AB`-C# z)7H3UNyCjd@6tDs^q=<4JjFe0O* zBrIy!D(O-G^1Jg)jV12qj+8u7VnLT#X5ZScYK1I&V-;W?i@LqWv$seTB94+_90+`s zl)&imP=?(iC`6ZlM~Wg>sNZzob^Z+&^Ut0*`dSlZ<-yB2QP}p^>a)$M&*np~s;|~2 zTy_QWxtWmI+lUM&U9t zqg(LDzM6_KuJ@LDTh6bcCc?um(YetArMf7QEv!WIOz)dxs0Za^9-h-v!5g2OEB7k< z3en@@2G29$gGEu0{t7DYA6$PiO#*Ht>!zpl-Q1IxS@!4KE4LmTupI0lPnp?wfoKG3 z(IM^G)T?PD#y3s3N1U&!?YWYB_oL_!U@IzDOY5SlrVpg-&N7ARbn<@|zg)Q7-M72A zNE-=wHrhyhasniQ^i;dq;{FsKuoxL~Z=t4aKM6*SFBeUsmmSctGEouX_u_Z5=}t(~x&dFf*<)##Hsb%y@wFc_g{;gyS~{Q?=(BuLhBN zJ@*=#d~ff2#eLsBZ8|K0Hh7#C_Y=^}z9Ax)EjsxZ1mc!1p@lul)gIMYiBc zxE(5HWVDQ{a^neR0f*@v=tSsZMv!H(xw#i=-}dR-x6%4nJKiX#^PQ)GikkX{XVSzi zFMy0$J?|oD)nDR!E^2CM zkh=*RiV4cHF2|{e`m@ztm4U$U?BS*)dB3QrrpxPr{rVXmR#DN%j_bx#K^P?N?BcQ4 zbNjbojtyLTmC=*k1z!l3pcKIxJ|%LKBnyMw#`B)OR|t~XYq)$)bbCdGS=T4_KEVzT z6P-6_W)G`U-wPnf1tVVVhMlDtude={MA>&;r14;ZFdz32nk&t3gYxA?l5^tICprkVXfvA_-`-sN2ct{HH^;Th3gjm;2&A`YzHwWTxLFKfKLG^m5>;H+sT@u@6#^En1cK2enWx2Gf$+xSfy-+Fj+~ug{axXpt}n-^1Ex z^*u>jY7>1E7m$B+L1IrP>XXiDCq0^Du_QF~q%)Z#9A9!MA~Ky8nl9|=(%LT9r||{( zqSNjQ^&#kn-{LDnKqou+^81p~+-}1ekOrqB77Kzg+tZq(a;uXqkW42LNRjlxox`|4 zr*p%7wg(b+$O^zC`t-AmcJ2CJBE5V>>Zr`^W=%p;KLK4G&d&C)d@L7?zqOurJ6>kI zao~;KOA|Yo7j^wvX~qQ{*(h3{$r9U=_vaUFdO3Q74n*O%RlCzFDIB&FJN!)-C-wt5 zUv;aOHv1$VR@uxn!UqQMllq0`A8oGOw)Wh1B)u9O8p7&7-I?-FK04pf2T53xEhIpo zj}DGl=TyyWTncA=7QIJY_r>AB5bu1e!Qv3-j#}n1y#{KTafx7zE+=X>ySF6jP3ver z5U5tDWB`~Yz_XE<`~7#2ogN;eQ2+M+)D-_?Ava^kiEnEs$G9VNwPf|=3GM%n~%0%6`BCvO$P-HG#off=mxIB2d z)BF}55tElT+Ie!m!jepMKJAWVM$c4okQTWl0McP1fX5?tgcude>+sglw2*cj)%#tjh> zD>cV=P?%flyzSWO^_gh0(3?~co$}2EjChOhzg^1m@;IA2V^?1C&uq}MSCr-vx@}cB zo4Xx`PF@hv!^xR1G{RRe9P-ljNuAXohSpOhS;&v(i2Bd$?#-Pzy&msF)2$kgR8u{Y zLt~;i*&f>D#C#BoIfg0>@Q;*sYPXnrsvc{p7*au(@IkDFCl;*^o}1H^BpFCgHx;e| zr_Su^jKQ=^1^V&P(IINQ6ivw`SX~+daf;<_uq3h3x6DqQxf}H-r-CgzPG6A&V?B0J zhW&FTmm~&p<)OKY4X{zhrSE;zP`1X{Z&sw8aB>^%p}4MByZfy5ux~`NZo5wTjPLJ; zF$uK8$@xwse=tQ*BQXVOCSig?Pcs1g$^p)J1>El`-juK4{O@6*ns@;5z3`K`!X5cX@o> zJvn?R_6RiNt?GpV@KAIZ3j!tPre^c*?8hV%yxtD=RU6s|{eWKRQq4@AivBcOJS*{b4vf8W8)5#tftXY`7QL|G??UH*sVQa1a;Wt^Nv%dNy@ zWCMUX&b}HSN;zGWsDS<4A+JnFQx!_qg@TxojvsGLTV)CL;Wq}-0O)UKZj{6^2Hcgp zz1paCtQI6q(x5lKhaIvP%37G@LdR#kbBg=y(au1v)zgKkGn5!7^C3SFS4@8Qblwxj zqc$nauq~gxd+>o@)+H|%^Fz$L3y>!)MGjAq*_oc>jT_W<^w2XriMzjZdKk1j8#;J&S0>>UXJ|8|p z`nDCc(r1thZ8P(s%oW=ZO+T?w%gwGxGX#p!$DLW;rw>Y00spfwkj5(4m0S1{JhrW+ z?^V|N?Cheka+H0oef(7dB*A;gi_fG*NCUE!XgXfNf5AEV#aI$VuZd!#vlGM!Wm68( zNR#S+{hX(#(|C1IaNkfGv%HUwNyF9X7gs(x4cYu|Y5xM7^WgoEVnytxS+}*4)JKPh z2e^a+&V;Xy-Q7jeEnR8JtT?yW>7-%5`eFj5M46MZy5v_eph(l zy@X88gDoDMA@pMOz)XH8d>=Bev^K1i5@ueYE|RHMS-{fnaUNS|ZrSJ)8@-R27pJw6 zann<}l;ylLURzaF{6Jnn1BIs2Ytf*xU_9tV3tkS%Rb%2In}45Hk;}xFDJ>8SyZNBb zP%d~Ro{mlg4vdG9yb6g-rVu=8BRZ8#V#4wkFG2LRyhS28 zB&44aX*_wPxsQVsbYPEc!^D#BT`&aFleSujY<*~Nr@`Hn7eFImmJTE5v2oMl=ZeLg zKS8g*xD?&6t}sq! zNGv7=-|#!+CGPhsLiJHd6oSPrslFyxZ-D@f({#)E_>{6Oh@D->`;>SchsKA&Hr_>p zpZ900Ott43IhN=zEua~R#XM>+Wx@=05MMfGl@8Arxs55Z!YgzN(?$!2`rm~<8Fw}E zw!@Mfq+5pjDZPkH!yVrhj@kR3UCz~=5o2uxlY1l|Uecv8;RZe8S?-GgC->+C19X{{ z%XXparAUp>kUYs(#&C~nS!H|z{hWT8#23|VE_|y;+k5p$HZHj|U!qL!oAVRzuZUp{AYrLp9jGA13SKHxZS?61yRj;fCJei-3wzO^WHeHjc5vLxH8T&R}A{}Qd z$}loP+0eK~5zs0?aOcSJFo-!nc?9;I_Xng0#Wj+b4WjGVSo2zF$y27xKEa1|Z<8>~ z4!M?^hj1vF-0i87^2Z*T1cvz`>tVpv*4FM$yXI@(0teh|Al3h9H|y4sF>ZBCdS|j0 z%{5-vt~Mbs>)isFWT&Vr+WBCNt&%jZM+o%OcgUeZ?lJT|*8dz!* zc_VgKIds7r!6SO5Cnrma8qU_BJR)y>=t8Iz9@O?L6#(baIhO%QgERZ}f|jEuLAxt) zZC{kk2gf~&iCM{HTg&Tnm%mmon6Bpr1fjSWFDhxO|D?DdBr)%_505@UB5cM)g${r2 z#*2ce5W4`(!hxYta*W7$t1o-=o2sg+B~8Xy;WoX`#(w!wlNiB!OW3gG)8mpm*s0<< zxOGTM{^#2k!__u(R*tT73Ic#eJzEN<_)tQ^g|IM?p`Zzrr;y9?*hg9D0&ljAxbC`` zVK|Yqt1yt>P;}viwrm9;$`tp9h-cR|f@sNR`%?H*c1j^d{0wr1MP2+%Py_$0b7tQL zyz1hjv4Zj&!3MR)Q)!v3WuQpKcHDS*cC(z+G*x9=7Yf4cmwrD5iiz&Wr_488-t3q+ znb?u0tC|mHwpn9p>Nj{WEBT#VuBVNB6gHGYQs_(^mH(Dmd9LL!@Z86p0BE3u;**o+ zdm&+3hY|kYQ_IS*ZjK)QdXT7pvsKM?R`E-Zoe5gnmoK6ItDgav+{xJ;#roWQsjMFf zIgBs_ll0@6gWu2=xAl{!*v4YKfuCw>+D+brJ#Po{IT(Jpf~K! zRIF()v@KZ7D+%-DS3^zSc<{-Pm5gfkn<<)APD*Gh$Qz*~u2BP=s~5D*pD~Qh!^_)W zG(2N`pH58TE~CW)O;inlm?=FrH}a!rggYQ`hOj;{BMS3V@Ovs{*f%)H^Zv7SHufEq z23|ERD8q20p5|5r(lz=?L^dcX00}jlc;GddGlw&p$Mru4bN-LvssEoi(JUW=qod=u zG#KNPC!yUrU%rHwgY@)-BhL}<#&vf?yDj8ov*HmK4qo2sf`ZPhyyT zP63(zQ(Hei|5rzoGY2_3(@1VKzUpwB^(Vhx=}dWf69N7RmjI92c=by`PT-+<5AYi9 z90zV3StTWmRH=5N)OtKdjpXRLI#(Izrj`ctU#mS~1gv%u?wYB0F2+AzI%sddd|?uD z0lmW;Ih4eFC%GCEravy(17)#Zz;U&^qM~BmTs&6!Nc#_b|Id9FzoEF%kfpmPD=uE0 zA0;FtXj}oI)#MR@*0+~BI=xT;D=j&oZ|xE@ohKD}Zqn=5I8gGjPcD8ZT{iy6j@fz# z*#52&kb_2M)NqhARW(RkdsiL!`#kHcvb8CWUBYDWOc6v!r@))X$HRL-*#ZrC^ypFa zy=4@*f9`#E42KnL3-wWP1^(B|2xG`zM+3a*eHRzoCFcFfF@WesW=+gj)6}eX-JcJ| zrN_Y(=#MsXxM43m+CPSlsXc%GZVwDUt(&e-b#0J&52FD7P-f5)5ZBEIW&~c5lnyxD z4?w{M^U-y^uuH$pO}`sEf_?)Vt~g)9DKRUn$zkc~w2Th>bkRtjjQ!NNKrAA<0U+g- zWg#`PRMgJAMU!efK1TxgleWjR`;R_L-j&F!+IWz%@E#i;XQS_xMa zV;LM{fbxd+KJYOq<(LDKWB*Z2zc17l8R+A`qi&%pulFS~OH77_g$a7W4$}FZHXWp8 zWJt8>n4sCWHZxTbEVOT>-|#!FqhAm0H(u_6{C(*@JDcnXq)(NzL~NDyYaMA%uWaYO zClq-m2gWu&CNmcJxSIOJBidtgU~i#Gl$||>i%F~0?}p)e$+}Ha^A4rD1FHP|Kt%di z$lyV(A((@n%blH_orKpeA1{xYk%eVlQejk1UOxAw#3*~JqPsgk=g}}zD(G5~V*D-E z6It~motO$&65CXDUKJej`&{ucKVfDXpwi?*)QjmeRA~cXjCR3UOXgJa=g*)0O{@W& z^pA!rx%i!aQCj%~#7S7;$2wIcKt>_}54;x{hg}MJzfM zA;olsg!hAep|i`u68b+U!N~XrO`q9=#${)H=-yyDe_Z#}y3;C{b1qyByUHX;^74gm9@gz13JEVhFDUy7|Tml=99Ml$Ci-Nu?g<=AiS&n5Bu$z8N+T&u=4$ zJD?>D?%2S&(nR56=2l`RKy7Rah^S=#(`e!urh~}l#a9GL=QUevD9wX zxJpv~`9$#ZqlL1$q3Ua_FdM(jICn@6io_p~72qo1%O0hpI$3PB3b#}QO$z&VM(oBs zOf2=Oo@;o%#0)`X){BMJlj2GAv)OG9p7$fqVc8%T=)^F3(=ytFEc$T4 z&oRzyPHD1Fhr=vL!D-bUmlOhM$y!x?*rfXns|0l9V%xp?mV&swimU$mh%QzDPnvT3 z1rdQPGq_HN#1xEs7+uF~5&6buMkedht^T#H$w+=|Afm*(K;LsS1}*AT_VJC9V7;?|Y{A0)!?Ta}jlll|{aAcMOG07#|vcek!g&++Q3{E zN@;Np&GDu`8rmP9ym0q5N5yknt`WOPE^^IkUyS342&OKnORKZTMvjh|qMIQodN+p}2Bf=m0^TE(rnv;~nR(mC8y^gSe&sxzKIP?tA~X?m5`0Db#PKp8)#-tMCi;?8RdU(LZkX9WirmWzi9>{SGZBH@~ou zvxg*--OGP5>h+8Mx9Leq<~Ebfh6&KccWTkElRrheg;QYaixGYZeB55k72huLnc&O& zK&o~Y+};O;<>Gy^j6HChHhFNANF#iyJgH;+U=|;USMI+WfF>Mlb@3xEIgP2~`}wD5 z3AAbt%{OUN4>j&1h@kzVjS;cMen;mo{Bjl&U8a-X4VA>f+<1g(YGQ(r7*MK&l-vwy zefJKoU~S2{(`n>B=CpV5S_T`!^*(U@h=>Hw4X2|sFi36R}@Jk zNhEWB4{QEv9DjfRf4c^1M3Ap;*^XvqP)+SjoSYp^4Q=nf*&A7*a_~dgAa`GcgdnWy z?hd99RxK4HD^p`Ilv8_S4_5uIj?IdYxY;R%;5fMTCv&?SycZb>U-@A9OHT$N> z97QEW%&^(-K0G2iA)CS9U;eIyzgxoJR`9nK{A~q)TfyH}@V6EGZ3TZ@!QWQ!w-x+t z1%F$?-&XLq75r@le_O%dR`9nK{A~q)TfyH}@c%FDHqcN7RGFJP$*|NOzh0WSMD6Lxpue0+SUcbCY;!T0-p z_nm`}AHvDO^ZS1`2sa=5Ki~iH>u$S$tl{KhgWSE@z>)YkIl&rG)!cU_^R-`2N_Mn~xX53%20l1?S@7gYd9%qVlqFgDw86f81PL5N=*x z2p2Cn?%lCDxWM_rp5UC^{Jap(-+OX%bAoSN5I%kmR35P8ZVzt$yHarh{*Q|bY{z|f zCQv?hPS7m)A>5!=x!M1BXZ>B0-*xq0mFeGJ_J93I9Nho8~4Q@xqLjpcv`^k^fu|;o)H8zq{Q3%J%Wd4z8}QXJQVn?{xhZS4H>hnv`75W_*9pbHAuwhmI9qLPfkUW<9r`$1;3kLrt3` zdfUEhlR>1N>{oveL)A$_#ypxjFC?Q9#V;^4A>l!2?DHS3Q^dy<{<85~5E*(jLEZTU ztK3M4gn8+ze-5XX?vL3A5cAFZ25WGorH9`f55AScX}$HGxzrsQj3I!S8`{_*e?$D} z*D&oL{a9*uN0^kkzdytMe$y?SmYbX3Jqdch9OmYZiTRH1>DC@n6F7%*rwTYi_j{5| z=^5!QnYQYLG$480@GE_t!NR_ zRe}eNydOe0{wnfdeviKRP36;Nmq@ehrsv2A-^xniE&4MucHL7_lv*STd@Q(2iC!+(3@^3>=)oFY?r#(ilBCz>wJdXr+2$E5!~rhB;2P|?!#obtRt zK{1kU1cYz*{Cux{ZpR*MOtcS9Os_noX^{^cMZ{0Z{g zVpx^{g}5&_-n{{R-EGmX_7^wv7`JL(&j*7h@H<4!yEt*YoCGS+Ci(YYCM9s1r_&E0UT=TYo0oY0IJb)(~O@6EQ~a3Fo-K9Bl@ zSI%9##%;T^M(1>-0L^;#pd1u?+=v^n=rlc~cgtL&KtSXMJ8RRNc*zrUbWb8WKKw0J!jtzmuYpznm21Of7&F| zlDLCGCSElGQQKH}dT`Yg=t}`>`e4Ajc*2255zr*HSboHs5;&KYU+x!YV!yk5tQ$2eqY_*k=`z? zE<}wkD9LpY>1_u^(ltHskh;!=IQue9!x_E31Lb-DS6^sOz*` z_Voh4uDpj}BZeo{uVvJJCR!2EabMR&R5lcJ_e)HAyX^Px>$(;*RAkWe4zwzst~;ov zMB?PzsXuvv{Yr>;B)Mb>^I2V^TE?5s$^@4wnb@P2%(+tQbN}Vl@F(bO8)?zwp#du> z6>WCvS4yxj=9|sndsNV;K`4zI)FT0io!i~Nb| zKg!XFUm=Hl(>1#l7;iLshFaNFzEfydt_fR?+6jKPkav>K_~hb&u(rW6W?T@caLeFK z71ett1s{z#oZHS;@hIo>&g7dBxS9_}KYO3qaNz~lSyUT8pnWhnCV*7PIfXXN^#jR= zpr@+ynTB~Uh`7WT)+5y3e0NaU`q?#TQZ6q2rK`68BaWnJxxG!PhwQ2H(d$DZmut~3 zJ6W`kO{&m78h+$Zn41r*`XE~K=Eo390n6w45-j>s>h^M!C2JdIkrkTOlqS1$EE6V5 z^0{K{GCn54L~;B5w1CW0#A%;+C_UdNJJ%lL`5zlYu}}jam`kdb)8Rp)tou@7im*71 zPv}T$xJweV!TUu#D1E3aURk|tMxPCTztH`G)ln05#N*lz8O4R4I8vF8(S0A2)(_5f z@LD~!8#j*V;&ZKgQkaqtAL(i`qoay08H#`^n7+OJ{Yz%x2l+bOagiMIEHxf4S*qgn zFQ_5W+rn*#MlK>5Q?n$72)N-j5$~KS)B8 zL#4n{P^T>pDEgWDA$}6c|B3g2_C*x$EWv`h9WPo6d=PGO<0Be!{`2Vew-@pFAq&}exAv{*w^c? z?ykr5UV8o+-CA62N-3N+7&%8^;&HP2!kgUz;?XFlM0)f|_Q?H;cw9ow0ldS=6B4}t z2Qfg-zjfT@gw6Lfg+{1`*s>E> z2!lIu`A_8zH6oCZZe)Sz-{Upt_vUys$yqvf%+Hh`#5fc+1Ec(y_UJm?C|B>^?tLxI zM_$svRmHU|uDEM;t6{};Gxo(#M;Szlge22}PJ{0)T=(Fv;59$3!=FK88htz!?&&(H zJNM!CADc(azvY5rS&lm~`qVU|G14u7d(z%LTBJDj0~Ja@_vkQFY^u-1Mlmsw@|z+9 zDV{48ce;MR(9Vl;5@|{PPU>BS3q9Oki*f8yMwo$7Z`N#sYlw6`jyYPc-6pMd5F1B&Z)mj(rhG zN{X0?{WOj3jeVH#giTk17AHyz3vxqv&WJ*V>I|)KQ3f}YgUoc zw|5D@5eF)l#?;|!>62cj^DiA#Ko~ipq$cS5hntUEx>(JOGpq`$(4wzKyU`6nt1lgS6FdRVpAHQFRSP3A)?L+u2n?tbIIY+!bQ+xR3_G$ zUS*VvcR{{PQo@#9E#8MiK59zo;)?;bpl+btq5SJ~a4z%EsBhh`8?7pM0OH4b4P>Xr z{Sl|L-ewVTblDZOm9|cL6G_&BKU*MZ;_fH>#iXZc%OcK{n$8@iG zMCS-b+0JY?0}WCKaj1&K4}*I%kmGjWbip9^Ixv+f*|M<$n>m-x?ns-43r4^u1vGX4 zMp1hDu9(Tash^diD zms_BJgI&$a4uX?rYB!eW0Va$Co8MBc^eKAE*7HQOGK%P+TloOfuSb@SSJ|1A^m{=fTVIhR#aoP{J=(l-r*9&ZTc2!%~pba-zol zkLV}R?UV9pJ}YC7rpGya5F=pW-uEI!BbN-^%S>~1)W1MK`B93Uxc*b8ZyCS55w~!! z=Xs(2LWgiSF~`j$3*NF;#h%DG^B^Vzqj`5TVWyojz$g{C?jCk3zEudK?#-FE7MoV=0l!S$#xto&Bc z8VN)+$)xWC@-7y+3hXw6G-jci%8eDHsiHw-U2vkeqT8+fPctINV)U+4qKq>Fo+`4Z zn_T#OL+GtD<@U7Z*^LKpCWDtmA_H-zDw8reNG>IX3FN3^wH`rKM1@uYty_jqkSklLg533ev9ZLWw-?jx<0*dVy z&%KPeF)Wc;1#N@p>JPLRc2%cNbG#}yM;N0%Jh@~Fs7-K4AwG2vPv1YlS)}FRkAI9& z2|Uacefg$*z5QY$Eo(%*;_*X2k9=`@@O=L|Y~MZ``IYss>SBZP#oMx@;ytIhEmw>& zPZ1$+FVTr?NtIH^sS$^(YPgLw^v8w;L4{sU7es7dxzC7253=P3mw*b<;{y7Q5ytX{ zcyk{?J8m&zRl8~lqBLkxx351;9QNNwe^8TmjmsekNoi(5vW1XPd7G5Zq0uTej;38! zHGDg*hk24>%;5is9EDX0Uz$}jAFqW`MrnPEPqaYCR5Mb{RiE9R%e~!*vpZVS7%d0D z*pfF!CW14c%=xkV=?2{#LzPqRm=XdjOW%S^^*NiBdc}aN*qi3-ZPoGf)ME-M=M466 zJ>fSAXGe9QV+r;GgpJOxZaDwKL+M%n8Wr12n9y^w-Li_H(>U2Bh7WiO@KqX^V|JG| z-EiEbdn&AHmJq(TMatUutp;_Bzj_mB36<#j*vZ}#A5jY)1h_vrm8o!c`+9aiO(wx!f>Q4@j(WF!a7vD-8@396=E6Ti$=I^Q?uPHm(qeF4cuvM0hxMe^Tqw zJr!{o;4E0)+k;1LZp~4z-DCA~V%mo_>0XvXDM+O|{1z5Jr@(V+{lRC=KvHUq`|+n2!{1?hNONTh!i8i4Ck^3q_natsZ19@5` z)2mV-wzmUL0j+D*>Z+pCx_BOar}pf0SMwEhVBNmhK~$PZs`OR&pjqfs+k^3z>%7QS zbBGx?JS$@~qt}~X5lR3Q<{3%(uY)3u$1=RHJoE%>Ff#eptWULVJ{}^WmrPQ&Q3g$~ zd=dMJF}&N79Ut}VnWQvDDJ^Fj3U!UT+hrxMfw}SM%Qhg-m}9Uy*W6cs#lF(V(d7J z|M+$v;{1>a&GJ{hL9Q0i`WnMN{FQfYq;|=k^Dl1L&$L6B8`g@pl#p+c`5?O3l3+!! z-?ddRBE^Z*#ALATp+FZ$BX?&wjAnl_?E45~rVt(GP*MKl1oxNs58giG!WvlaTAn3y z&O}w_n}D9_#WKtnt{qqDn{F=a{?EfiVfCeJ6+%bPogO zndh+mO(~`|HHUjPzN5*wwzCY0rw1j=) zvD9BQ!m`rer9eOMw}xpRB-d|*$;0R2|E|jgmDg0yI#&u2Xsil7^!BXiKtn@lGHC7kLgV#A zW1)xo=Zh>L0aG+xnJH(?Gc0?1Iv;bR4vS%p_4%aS zU&UG;0=IL_kA{d#TyI{B8IwYL4Ga7=bHw>@uQv~i`NWSfTQljm!&=L`M;OfWmT>ad z7R*}EYY}YX8D-V_?meOt9yv}fmqOm2gTph#mx`^E>zNJK}#X3x{ZWLhJcePiz)IO0-`48N%toP5T_D7TK?c zmcYDyaGk+ZYg`z+SX-6||Eq+#q$rXnOz=17q!^R-K!Z*j+lM!F|i8bH>{3;O-27Od?R040W zdj|x8U42s#e5e8i%>t(4sQ5)4kq?QtAMK2Lzr!(676v@79*jcO@O;$gXPKd(%b)0h zGmxMZ5Gj7H?{!B)k&jw{U5fBjueh_>|EMt<<)ZzH9)_rHWG@-zU@Z*{5s2$j1$fhl z!05@=+Kv8rvQzDbkioBKK{pCtTg(XkkV;9YarP+()w4?{%L(e_fZl^z(kl|@$cIzv z4Nr!fK<5%XoTK;U?SP7)jxYb<0CLzBh(VeDe*Z~~g$)gAwB%&cns@OXQOZU&quvS} zgFUYSeITp8?ZHeJOocbC(8zAX*em&A*F#23dZ<{Tj8ckbHTNQIHQmJioA_?Vwwv!5 zG8@w}ucnC`u5rul#Q6P-{l(j@YB=ZIaE$WBnpjq&tRa#Pq)&p~2Xr#==G)iPpu;YO zDvE6M#G3Zbkp~>zwO^NSc;>gRd?H^T%V+K9xn_bi^c`vU&I~U^CoQMOPi7EGShmZ#(Bi;bp zwq|QLxg+p7Ha<#c0Oa7lE=HYR%RJu@K@A`CVMDF(uew*w3#H&-nXKu)(2FCdqNRCV zeS7>F|1eb$T}Xo4aLNhgLQpa%-CiFc2xZ26a&3=%v9jkHeoIbMPK1l${*|2b8;=Qu zFDBrNep=SMs*>rB)DwFhA;`(ivlQ>Bl4eHpF)JywRP+lhI`J0Mn_*2W0dBML{igSs zQqWWEAe+O9HLt#sr{jSVNKad@4Xf>}F;iU?U6}NTnqFg?`&2rNu%|sp+p)>z{go4X z>aZbgEWhS5sTVI;n@OE=?iNfMQt&*6Qr;=rh)*NQzYO-H@>Aoz&uEg*is;4) ziXJ)Y%$vLx5traKY>^B?vj}@HE#2M_Hz$7>Ca=07n8yBN&WEqi6Ary~2!1eWI*Hpo zhVO}(I>itoo#R=7gT`c<=df<>fF9Z{J(f`4S0#J3kM)W)g~U2 z>RmAb?2qrXjp1r_h~GU~*%e(pEKtqJDon%=C6>E}T1Of+Ot*K{sunBQ6bU3FMOKIZ#JjUGT^*)WF1|2>NTUdz1c7*r&`DgOlh<{`P zVH0igPP(}-v}JW3`k~F0pm&hyJK6IOlmlqNtV>hYL4w&`=zETW5cxsWb;v+4F#cKtI5mAaOkw*=E$(` z`W$@R>7`>MKIwEG)V8tv*@mR?;h5w#AmMzW$LY2sknqj&A~wKDwMP#6+J%37M$!8a zi&46tc@=wO$G3$0`2466A@Gh>OO}|Z?YvIWT+SF*POdvHokpXQoD;1f=(MRlx_GTQnxGU$orY(aEnH0>;?y&CM(C}9#_2qw>A_*B zwp9G!1v6|l#hfRX38J_}_))<`S4*GRSqR9qd~s;Cu{=eCHf+P7u9t!z$~I7rF9d;r zyxVZ<@Yl|}C>BNx^8r{!fiL-AIWLpc0Qm^s}_!_k=5LD0_yD47L8CC z2zki2&Nr!Dd3cb<$L%t7?Dy3+bC)j24!Yq(FgIh-KOC{Ss0o(fgGPj`i_}^jPGkOl z+GPXZks4s~w!p&RY3R*VabcMoTzcC|mG5cFb9MIgxJIzQ`$WfMZUEV2YlnCk_o)0X z0mHc;#VTyrPRQ*nQ%KUKHrf2m*fL_G`RzPuBhg`%*tyIqPE6Y;>XXa@4&lY=tWjl1 zJ4S|)SiapU=vS2Rioo7Esd4dh`A8i)Nkb3HMd9hY7EB_XDm#T)8WcRYbq@w1I(fxZ z7C6UC$Cd%tAy9~BmSO%60}1%`vSBQi&3XJm8*z5wjo3GR^ZL1Ga=L2=z>#VoXFeh-TVizT3WJJ2V~3F*mIrKus7sR(bInC4n% z;+y}P96}GCn2~{y7j#WunklHvz+_}SCt05KM$n27kK1FM$5n^XWp(-!hW7+l9JPw1 zUd(R&o5KxBlSZqzTG_x1qdMkx-u;{u53;SP3Zif*c2dg{V#)Sj!qQ1c_#xRp8nv02 z97E~l=U}i=e?cG}%+SSXf04o>=z1fOG{1~gpxS$s=}+sdW=QUQ%9go#0SQ?;Hg|E1 z6U*8EQnutL^tGEBxPXTHWb3LH0wOrhbyEj9#NIAp}}`ZkLp<#h~kcQnngMBCc6 zlyw~C0%5)~B%7-fTmc9|1Nf~Xj&Z8q!HzfW>94>}%k3B}OOPd*vuQ+Eo+aTSo|HW3Sx(+$ALqK|eZE`u9YtgF;V2KSOcN<}2 z5%VnY?C~-JFuqY?wrrS^^y~W!>6G1Lzcv;=)8TWTk32S>t(``-1}%akTO)$=vdZS{ zW{4ele@N9~Z%|!aPkET#v9D)4Ker)tmSOs!mWZn4$-QZ(@x!vdO4zHqH$xeIaIh>)?iDGM~+{xO6P3gsejXf811jo3&P3 zZKzyG*;#rixd^Z|(AJo2V1b6_CMn~nt+3WYq66y(pfJ;#mDpJW1D?1fHMRMYA+kB+FtNEFMYCKE0k)L;_5s+&( zYm>9bDn~?WlT`tPYvqb;!NmQgqo`)6#o@-NJi}b2<6UABI?naA4H|5N7~^@t=YXZ= z{NS9=_RAakhTFe#DmK#ceRB81OGc#9B5=p*gdjRF6qbQszBTq8Q=6-NThej3RRkiY z{^)f1amDh52;jTe1_~ySVKy1e%Rt z$3@=llrPMrmdV@g;-q1K0cpM-#92_IpP6U;+642OPbPQCQAyZQFMA`i-uUP&^IYJt zv`Uy!7IcJ;cB0{%MHXsNxpKVOFi3vx81DCY%p9i;?_kmhCcZ<*;K>ckGvQg9ihgcx zktYj{*UQnyxEHxvXnmpL^5uG+pkkG&=o}jN8^VuR^9WMf5BE;VqGVQt)->IQuS=Nt zh61CE-OpY|D$48?Hr3CS#S21_B^O#t824D0Frezo&UZ^?BO{8aI~yTmr?!X-Z5Li+ z>EoC|1{hNfA3%KPqh{`;D6q*^0TlFls-oNg`^FSm(&EZa z%s;3J;e9qiFK1WO5yovAo_6XYBNEIml2VH36cijBAn166gd9Qbk`8-TkJx1v#L7=w zMnOV0>&omPiZg|Tj%&Ztos$fF9Ts_@@uQA!rG4{Lw}))R+rK6)w>$v5v0|gFa6_ndBxRpz*`{**k zK$F_B{q6+67qNlKg+r}Nm*n%WHfeHt^LzMs{IZD4m()+^1vmJviE5D7^z}hvs%A5| zZoZZaw)1JU9Bdn8b*aL~?<{wj;D&ebTD>^=*Slr0NT!ZT^A-i(&CWi#rrvSoEvR74 zv|G&T3n65?Xy?f3LYD|_DJu0MU7rJeu`>b-4y;uL+oY!ikDFI_Ywg7|*xYB8pu|-c z$W4V%_zw${uQmE{m6cPyD9xXIXoL;ZyXC07AZ0^0AjzGZe4vi9+a&tn9_y<_`E>GN z4d*Ue(gV}a8h6(XZbb#RzdJ_5lk}Je>r1xJL~(U09S>8gJ%lsu&;fzY; z^%^<~sJqkXlh1lx9du}pB9hV$b+~Cu={Mkrl9HBc<}Iz29EzHWgRS9XUQprb2qT(j z6;E=%A~BUb*Pa6>Y!<@neQaVpi{MEQ!`T)efwh;3laB<{9oT5Qj>L7<0w6wG5k@srZa{^Wb?3B{H8g z!9tSK$xn>eHC*6WKh>){Vl_dm!zoUATtPD3+;o$VCx5S~usyhvmTh8gWH9 z$^l;<+(TFVL(q@UFK8NqNR5Py8Asbtnn4{1!Hy5rLJvoxw&9_b%_Sog%!a7Hvi$mr zTouvqCHg&-5`BVVEv{&8k&>lfD&J{!96l!Gzh8J{3)e}{z}6YtGl}XX?&i#L{W8*= zC_GAgE<~M7vdUz4@nYk|oKh=wd-=%l{+hu()*pHe;ze(ssZ-`n8MP}WU*Gs>T2Bh* z%|^*r{K!eAhUk{_7s&(Idt`{Tub)hnDQ({hwz-wc4vWFJNSd>u`(zT9H z<)C;<2#;bM_C&W+5SEZRZG)dKDsEM99&XU?LT|?4h1-v6xSEawbOh{I`lWk<8`ys0 zY{LyBA=-1OrWlqq&AZwM2?fq5ZoUet4N|@U+lNuMIQQ`^(WB%R!^;pc)|TG)tOjl# zi`tw>Bvveuk40ja8VmVd8U2)B&@>uk@FeT@yzY=fC8Nw#fIrScJmaMBkVS#fO4JzY zH8nsqkL93;42k=&ez$03!Cp=Z;j0995UbG8+zId&6tX&I_I%pOlsSrC8>p(Y6JJb>0pj4ugjgJE^6L%M{5tR z$Y?L? zIeaT=Ye<#Vr+(;@9jNfIgx@u%OZ8l*;bAx0CP?ye&;*idBo&Ju>%mtpg;AMxcLx#LAUjwNu~1&s>1RfG2nX=|`bfGRCp(WPy{r9tst2i?ZV~X0< zl4km>MpwR~_EqhzDszDK`(>Vz46aa~Z2wKvm*+*H)MVmaoZv-L1XhIks5x+Fhf`}6 zY_Fp4gpMVJ@qxfUC4c+Pza`?HkG~Q^^Ggvh&A6)#eV5UJ7=ipzQK^1WwEAKhDJ|`E z?I@Ms72kDj(y6Yl1gW`|;;O4W+kkL|;r+!IzMrO+g3G;CQysiB5Zvr;(`sTh(yI5N zS5~lzu)$pRxBOi8O! zt=@aUd@!9@y&#Q{Y9x}AGH$hix-Jn*T@53y*1fCRim*7Xb)l%+lx{wk#}3N>{5vhm{Ld)b|T;ypE2g zv%+0Hy(C`~{kx>nohEYiv^D%}?5++L=Zoe9lJzNVr@%%Y2 z%R4ZQ;On!agApe&SbEFI#Inlqc$KfYG>RvF1nPn0c#7H{dU1{?5u}EwG>q>no8{{@ zM14?BTDMpb2NT+QGD^RYRnpD+PF=$;4Q9llRIbp&9S&tOXu(>ax>|f3s7HKs?chWZ zfzYI*OdnMoTQ@B-Zj4Ys&DHdvbWk79pQvSq$n}XLWIZIvr5if>v0Aqgi(82+T%l^Z zC@_{Ho||v*Zu1x+J+)Y$pFlbz2_@yX+j&$dneVl7JG>+F%G6|cAwut=uLw{58w;vj zcAGmAo?1M6zUk25wT+jJFVRNUX6G@Rryv}p8ohbQJk;9>Ad7b5-15V2u{1ixpzFo1 zIZAO_j#VifO1$mN>9hxV1veeHB()iGy9oZ^O`3b>Z_!Hd1)B|c`GBz6ujUNWN!S*o z$gm^sf14j7>jvZDz(l77e!#!tPV`Mue$T-1e(C2%28t`OkB$m$`F5^Kt#$CMr#ah0 zLmPIbL-gQx*hy=CE%8Go73=k}kC=24Jn_y?Bapqs9?=(*8k!^tS#uw$-D-||K3L2$ zxci=}CN_UHLwO}%yw7<2RuqZC{-vYX{HEF;XR%1TjknTiGTZ3K(+5ZK^bv7WU{6bb zpYHay9T}Bj9C^1eM801vUT|prB@YoffE^x=dT$H9dNdz|{gIj#QipzZi#7`-pCnm` znvuJE@jX<iZUJTXR`bnw!g4^g3%mUQJP^JrsP}_(4 z@_lttyl^tOQL{JD*XgbO&W*2?9VQC8El5j(Zt)u$S208v3B>I$vF8Kd(dFPigG8m; z2{dd=GZ6RU9}C$FuW{>(pNV!5bb<#HBFdH!k-mllRJTrWAyX5X^IJffoem_SeNXz{ zPJHH@9()6)xSuy^@wtS132O2Kzn-VT`ivD^q5RM!q{8%-(!7OI(NHas8vJqVh5i59_H2dZ&qz3bIN6ufw zC(zA$Ooo>x4ZpLv)}9^{6A6?d5v8404!cC=qpi4R7FcW8jMC~4sX0oZMHom`lMjJ; z2+(a>C9apCBYae03TuIb&O^k#AyCxMX?%biTU*foY8$N ztl<$0bE^J?!;e{{M3XA#JT^l@pC3AJHL&pYyM#SzC1bJcvAv+}q0=c0$X+4odzu0{ z#;H9aomwBS!!`wUfv93u3f#6X(UYy9N@(t)qIrr96c$!TPULk1nhJTL%Qk$_U3T0l z1HGvJOdKVM9i9>g6Nr5jUyMxt@%l!vL}PL|o_ORfwM4{4dr&=VvF|%Ghovu=seqtp zpI$gN;ay~@t!hVowq9jQvZY2T@51_PB3e~}ok~$F7q@7b2o)ird1pr3yV)c?Ur z>HZ2?&M%&cc$8h`);gtMJqiGcmg}#3a$Qh(<=|&ZiTcEOG>YztBjfw@Trm=!{$dyU zIrqBBJa!qw_fm!^#V~B1iL2-ij}nd+5(+UZl_ObU9+G3S56lFuyMfxy(V=Pyorn*i z%nVh`QI;`vmKb5>0?Wdsbc3EwC1p}tR#CC|_!ZbOLqazO=$onh=OLDSt5dXmbYb*? z)9_Cbt&fClj>!emghah?@cwXp5f2?`b7kw1H?x`o>CiNiM&x%~rQAVurt{8#VCHp6uAfqEJ2jXZ zE`HB-C!oUCoolCCtaQcqDzGHwNNvGrEp0J2urEVk4WX$}sitp2aP#|(_PkH6#S4e| z$kvBY-2>pK=sKK}u4V1|5Q5$*0(&}CXX?2LMZtX>PgG58{3@KR*xaAi%;7!8YD46( zNzE(YLN{>DL@k~9aX+=)qkN%~ARDNqsqocH$O*>U|5~Xw_~XP3+Q;GB%OIj+gI2#jC(w|bQ2UVT z=Lp}S<$QWFE)0U8jlxp<#TOkggxBC_@F?A-$l6@KjcF}?eqVQl@s<<_N*J#!YJAbA zo;dJ1?m9I(y|bb2%>>oA)3dDz6$t0?!YHD!v@jdZ6_t%7gQK zfQxVASkFpdBl&~21ZL8v9nXx|X~o!ECHWotr=Vd6LoEHCy=Ux?Y*&!p!Xg;&Ltsfw&UVo?jtn$ zn73Ua$Il0zIaL989z4H3WkG)M{M=o|&SKxyfG3Yct4s7czMGkShcTk#9Q1Z{JY*VS z;}tpHBU!gsyLVO2tR8zn&9=xVme$v$rexT%Xf$QCQj+1!n}YvKI2SMSIphY4usoM59rQm{XlD^w<5s5w`Wc&=9~f~gs%8$ z$Lx2V4qec9NYJVpZ4Yf`gL!_4BY{OkilsAsH5$qEoSTb_DKMKpgl_HxmkXg}X(yE> zwyafa4Ct{^Fw*}hyu!7UdJ6fSqE7C^8L@znD?ZI6V{~zDEn99;&1VsreMl?kw?=Gu z#(|CJSB>n)7*+30%PLb$TFTcVM^kJrXa|G$Ij4EFl*KgXyjkDZ%;h6-GoESPhKzET zz}#H@m;0SGj&Os^H*Ij}-;EM1{NRfy@YY#+0%r3r{9KHj*aU;Gg^fK4R%w|kr`1Ni zL(^tuE{;I%I`VZJxpmH5%Eb*TGJ`msZ{&CQSLln==@94Dsa_C!R*2bX(aWL^n;RdN z&W1XKA=(e@Lzp663j`cX9ibkGTpK|hNUlhz-_6Ns-M>A5;0X>f&EMYy-NiL8P@AP9 z-hQ=FtiEvj(58cD4n=J7bL~f+-!*Y@Vn>u_zae?#M))>lTjLM+K%TUy)~68cx&)o< zhX#}>Y<0I}PR}nn0=&I&TXun;z7X~=MSRgrV0)=*D+g}L^SiIK;ytJ?Jq0XfzZ{Dt zBvsYE(2-*f7-sNLOp{fuZcdM*s^R>^ejRcvnx}7Gs2S?Oi7_<_V>i*1@7BnvkHj`+ zIWI#+?)O^dyL;E*Y$%r|f`wj%>FFm4{-TG0fuqP-#gP`8{YzE?p!$;&FfZdZ7coPd zr3$yc@Q1adDV`ef!gY&|ZiB{S%0%yaBNM*OhC5ELGnSRBh>#i&dllivkIY@p)@bQDTX`HZFB6P7IQX$B=*uIAhpkcaFDx~qH$nO(P>LMQQqroOCtl5PK< zj9n{*cU+0Ke{E+y?`ET0+bkzE;L^ZB{)PPY;OUq1+F}Wwk&1ENKbJX}L5a67t zF-MW?>8RC3dE=P%@f>bHSiJbUEC9pOi|{q;2u5=YiP@*yu@0Xkl7`qAE8nua*g>=1 z`DC&GvUf1_Ol`9^}g9g0eZC?|N|M>ol2RMLk)b9#!(F?EO6a~4j#pT^B_Uxa3Y zUDsP{k-@iqfG%H=ox^GZl04GMp$WeViUHZ)b7I-D-7>E>b?HF2G(@5Qt_o1ch0!)0&nMAXCLP9-w zl`jN!l-m18kyA2tTHL_NDg6Lo@2m1sp^t|FBd9$eW6KfIrthP52#sdQMd~h){{7c_ zwfOcEuO6*!MIS(*X5nK@5fQ^c9)%;`DWntBXfASu>}6_Zt>J}FOH4t{U_7@j#~o}3 zT{>pA9^3!&jZ~X!oCM!@w8C`;U%>LC`AOpPuh#xUjUsrxHe{&CPP@(esKaPGU3-NV zhYpDfEPwBrH`Oyeq%4#%&ZEC!QuIq z&B{g~|MU{iy)(Y8N0{0Z<|rA76T0k9*VyN4p8g1PMQ*>JTETdv< z6(~rrZzzmH;Zo5#2zowa#wtfadw~hUhLLHWxED~|s5ppzLuaUT=;Y*z4R+wap zS8wPgSIU9jNHd!d^9{(WA(rFPL;tAQ7H`q6v8REW4 ztQc_7bkZ5ozq@69ByPpRXQURd?;RJxwcDMz%IW_J=RwDklnh~V+gM*-*?~;9H{C|u z{pzlS@Obj-la5G4sPF3?xeuDn3+Jeo>wMBxq(}q{lk;N+O`3{{Z_}b%R{eqVtN*;c z2m&`b=CBm=i3H@qDNfZuwFr5{*C}E(R+9Eb%65c!uLMiXj{U%BM4!DT5<|R5_tok) zd{gD8PO6|B-`G7=)a0Y~oVn-A{ZATS%LhFwmHMAPO8b2KLh zH^QD})J@blY+=a{GqSR;wFegO*`?)Yr+nfQo zs%mxYbVnJp6I!lD2lr&UulusyA?VV$gK3%AHgdAHorbQfi*7EzII6Q8J+X6<{4QV{ zbmxUgyBsfGS5rH;=q`3$aC0EpdRT zxeDCe6`Oh$O7B$`J9)&CS~GWcu?Y^gibOe_w92fwQS5W`WFe~k^{1?F)j-KW{EPXg z#KRs8czzW2bBJKIudxjgtAp+??qQ*)P9MMfSf_BNYQrUT>5iBSaX1y z?VQja#;YV$ysdQMa@&-1g}U4_*V+{4^NG5vK{C4O4>(Mkd#z_0-wX=Uq5SfDmWJ|` zm+_rMu9xbSL+J-LE@YoO7%; zkbeE3l#0R5OyFc)pcL{8^ao#M!*aMZPV-4meuLu3Rx>Q-TB^&^?dPwMr10xaoY_a+ zeCf9>YS8J5p7x{ErB4j5ogteE1#Iy_DZrCfF{lHRAKeF_U>|UNjAt}1PlR(idhcq*L7u~qhts>+*9Yg zr@>DfO#rfl=0JT`5!sHo96zU(%gsQ;H6(H4pH!=Dauqdw&q~Xc%t8F2u-l{`X-AB; z!L0ysVAO(BF4GGm7&#A0$RXstc#j`WB{@@@CA}u={B<(fO1_eZ1KnpUa6f>2@;`}_ z0H-{56znKz<{PuSu@9t3^ci7$vD2-Zw%SZD0${Gx#PIIMtxK zYQw@h)DJC3N&N@!d)3dci<6>Cb%e$~xS2bh98QHWn-nTqRnb`W)+Ll{f? zw!Wq>&OS7UJIMGn9W4HO2G7) zJo+lood1g~84RCzhl8R?N7IfjH!z~2k*2;Ry=;x-2uV@Qh%zdWc`)Ttjev6|bfSk* zeS%{b+@xlLE-SZ)nTA`rRcX~s*h=FHFoH&) zkECOP#>v8X949+hY~5|aElBpa2UdXW15Q-RqMsInoNQT`U-pGil62!oypF(tYAK(I zOYT|nd0w&ePr?o&NU@!dZOrKunS(FJBxS9qpd{qv=hFfb^Vk*1<^(R`1Dll7sVQal zkmn}iwz>z58Rp-xV%hV9K<7u53p)mzJD&LK^e}YJ(nGC!N$9uOANM*Rnl$txvyQa6 zI5AQL)M+$!hdk2CMWizyW5ROA*(llvT!e*HAh?^%x6V5?yHy4T)`O(O?av(GWEdEQ z3ob{wTkRpVp|n1pmr4~k2j+A@y~SumKVWQ^6yJN_0CdcL5TI4pin6n<2omHdT_1|Y zcW%gcV}PDU5h*H*E#Z9h=vr@}%Wn`_bbY@<)XZM4{b2|7L3~(B<#3?Ucmq-iqXxKw z=*Y+lX}xVI&O`eIy3oDAxuAzZ%wrL0n&s0>V3F^xZKe%Yc}Ax|HOx=AI3Z` zkP$)h9n{5ec4?SMKW#a<%{am*?==EcBXlaJ>Ik>7zcEPYu1ZU4MFg=GD_PtJKh};u z{U&`w)c<04vk-f)OJ{4VI-dTA8eqyex3+X_Q2JoBKL*@~1$GHL5YC5k+TL%GBuwciJ-{SBkL7&J%V| zSj(Efr32v@>-htIscMC?#r&e?of+bv#NN~TP(sPi9x=x23h4Q#B0k1)IBws1w;!27 zwJpb#0^FYTWH&S2qITOz*z?m6Pb$1&FodlGBYwoA6|t^9Ct*+8zd~A3&;#fuWQ5kp z9>VsvKU-mT!%HaT~yYoH}8Q0odFRB7A%~E?RMNV;VpTo2w92e_M#lI=-QQxwh)nm zN?GPRK9rm5xpn#smn~}eE?UUPd{a+ZXdSB&p;V%I2N+c_n{uNtQ;7D)wKW_mYK={C zNqoU_VM#!Pe+NL}h6g!hf4<0z>}qkzv9jOs?6x znW-Umh7j_rq`yu>u{J3Cp#+10zX-|GaD`@8<+#e?WwG4Co0Nk#hw^bL$*C;hcr2hz z$EX*=ef7D|^ikek?a}h1E#StVf1xnt+~OoD0za*U^E7N1vg^{BY7oNMwPi0TSvjPr zfjOc$W7Pntx=Hj!FHQVLCs4o*!c5%#K^rUg)5K#EJZU(QYaHD-d=jK@2qxVv9?HX0 znwf&5bGG^G&~{0f2!?cG}g)zK{TNMe_kd~RYvS;MOn7u}%^{Kjzi zg1oCGDLeWgZ2Nt>kpQ#Yi6I#VDh{$LVb5eJ!iab{O=$MCmCnEb(`3p;_s@z~4FwFm zVFY}$jokar4-$j7jVX1g4_{4zmetSVS?4fqecU*B(+I9WJ8SuiY!m0N9BQ###OyKr z3j}Csy8JQYdOL<6G0~ORtI|Hf=9@Ni|&YGrC1svZyN;z6d(E`fg=p-&t7Dgz`&>56A>+LNOz;NY{Ol(TWd4K zwl;Kel29bW-Khm3YU+3xePj#Dh%@5T);zv&0++9d39pRtvDbV2Tr*tbiI9>C6o(3A zFwwU%Ah$Uh63c-|-i7cnE7p&UHs-Bw(-D_->RM^HP#7VTX|2srNac5g6hpaj#Rtv@ z{$0}zpsO2Q^)w_o65} zWx&mUr2EZNiLm=hlTr3nj;Fn5R!A{So~|XA67m)WI}f-c5sMc%s3?OT*~%p7GPsir zdJC&SWZ6#R4dBaJ=_mW6{AM+wKc;bM{d(b>@)-*>l>9{)B}EkdO@MU<7l0BlF|Y83 zq9M1spcG982GtTvwMsAgZnzSBLF1RJ>n`sNYbE6k_KB?;CIFT8PZ`_rjsq{SL=BTp z>8GY~Sw{YoCucfO?QIfgpwxdkKr1dWq+fxW+COL;)#2Rpo~XIgX1bP;jc%Kr~)A>eaG!80WbPMBt04i6Z-DAlrRzuYruzXOvGR z)D;V?AR@GPpC~3JkOMdVR_^9%I?apL*fv-_%lw@>{sBDBw#9+_2(@0bWwZX>fd_Fl z2)A$Y6wu-hEumwZzX2|X_rVgl_n(wJ(g`*~*HXcl&v5cTKfUjy;wC;YppXVH4 zGibRJw;crc{2$EhP4X@5s@Z?G1#IBcT_hv)>J5^WQv4w_gKOLpo~1C?rhTnQ6qN+5 z>KE@u93615@3Gckx000238lr2a?x-_OMkHHk**qHj0w`u9iK-}&)QnW@1so&z%fM~ z6GXSBCG{#KFDq*U=7OlAZHM|WHfdKR6xBO&1%UO@JN*mTwFL&C26|c9SK=NIR{-^wv|4UUlNWb}- z&up5Dnklf6;-5C^gz<1CoXAWW8}1Q^pF0LTyWsp-@DWKZdB5VcjJk0prF_0uC%Z`#d8KP38{YLD41 zT`JBD3b$$02h$bg614rUZNnVDHE8RTog-CnUtAn+dkgXPQ?a*i-e0)3T1Z>`8iK;r zH$F`90zhRrdypq?!81W*22G;b!Y}KAW+T^C{lXyuICm8kRe@37u;Akve{z6BG~iF9 zbKBX0p*bKjla(Wwl4m*9Z%4U!SDW4X7tLQBc9Rkcoel~WtyHGfY_LSf8~*h7bmCmz zG}3j=#-HVFfkeuhSDQLK(;`nJx+jTPT_Qt-ElecAw5SW$A#%$fl{-J0LL}Br(WLi7 z7PMQ}Mg%lrjtapNQ6L>#D}3O%q`nV2qGgO+nv+QJ5b`}2(GBk2;9T(~r|#IBB7BH4 zSicszUOCw;+F>)~k6-SfH-Z0&Bm@qtkdsfbY_(oGZl0h}0Lquz6R=_=Ms|UpMj1y+ zjKn8rrT>B`qXfu5o&W>_s%1CJK-zS-74WK}2U)1y8rxF7TPRzOl;ZNNVmMT>Ya`pI zAx`V*J{KUxe+6)@EJ^K5xV3W{;o2*|#4YhS91q`A+L81scl(t6xc8jP znMo>P#(oLM89+O*a&=r2p-g)G#f;ipdRQXN3Be^hgY1;i0(z>F)qY_r{i)p=S%$#B zvaO{Hg?dYatm>t7I3vIvcdR3Junc^VV-d{v3kRzs`*Pe9sTCnZBg;)r)J3+fE1A+) zNXs@K7hhpAX736yPu~2+s1&PO>!xQQMzS$YLp^B0@R^Et#-}S1Jp@D}|2NBDw_x<% z+=7{90N0F{p4FV#l@+*}S+E+PBD3ZNGto{0;OXpPzN0imCT#rO9$F!^a;DEQ}OLWAm+eTALP9DSMIj&Tk*zRGuU^YD61SJB!_(p9Gvn?>y^b zdzS1-L{RC#hcv`F&0n0yoEDvmrVe#@v5_sl^yG_9cdDx`Oa<8p67}Ih1cHZi>R+au zxf1T^GZ(KSXE`G`mFx-u(a8Dp)yP1dgHstu%CS%&xK)xfOcv_ER7X0e&m z+-@mnidZ_Y{s)c{Zw8F}ACt z#+<6=raa8I8%xI`T zoy1LrVw6u@7|G>k!zJy-4``AGV0lO@qO%lRdDS?ay8Oo&sSgxRaxXaKi{R6(&KC|A zR5P4^$UkSPl`_10<$_qmzVUlAKWC?|@%Qw3k(~A6D>SewY6Y{f9wC7a5|1;g>W}JS zCVaod6S5wW3c}&l`DYz5Ra|KGA1~~N+^;KawgEtzc42yt$ z;i@tzqB!LRAdR%3f9@r_R3oHO;urtOrNP$~DZ3iG=;0fhMva0&kn4OWkn!#jfw!M* zB4qL!|CVV(pqH?sgt%H{d^t%C{V#+)k>}xyR!>cvYaurWI!JZ#d{_I)T+?96b5^r> zo_yCazvRyQ(tC~M7QCqFg}|z_;b_c#a5KjO<6g8^Cevj(oZZYwYq8r-GsZ=}nh9vS@7C3b?5R=Ex=xR#iEMd2? z-+|0eAfmQT_Wz^)sCj@^AUbsuH!YZW=f+HJwh$dWmqJ1PkE$(vGpzS22I{ms_tdzbCl#-QpR9nFMv?O_!-tjDf=wCFhIkd~=aL|gLqE3@df_*MSf*2Q z{m>AmSDwu2FGTM+3!7)QN&O^V#Mu|ntemrh{Ar0ALvYA&RQvOdmh?jXM+5ka2%zQ% z1w!c&Gtw(r9a+&i*1mZ-JxBb8J#}lUQ)z)dCG^_=Ov#%smIgHDbV19lbyK-*Xl6c; zRkT-#vcvZQAodV1Q5N;szX&DLXR6QO#?9hohgP{87^B1V$~}QHZ8BWOLEM!%1#s=D zL@3(TXQK_3VdNhBaM1wg!&sW)>HBm3yrp0O1qeBgXG*!qP_pK45X;F!87d%RDO}!W zY|-~>i`8+XoGh8)7`8CJo-Y55lQ`pbQr>>?AbNlhF2c62Vw_YmxCIpw z7csKxrjB!)!6~I*JE`+fIKh}RWD@hi=P2Xzo-yW^98@@`v5P6-AncZS`S~vIWmnET z;d5-)v;%I5y!v#kvBB1l(56V#KXqR_etqNbJn`B5MWGUmxD>xF#}Yu}(6F6P6g zwwEJ+X++A%Azb8^H}{K*VPP-=#(8@rZMT+@l%EQ;-=jYokM%~D~} z4!50bswCHjX^zXX>7$J|h`q@Cy=|iFAH5388_y{?C2R`ug2!&a>MMQgx?)wi}`>&gB>oJ$3_ZoFF=fDwH)|l9X00YU6)$QDK zH_6fxp${`Ng?t4pB{cKiQ}CK{b|2klKZIw=zDdE{KKVL_UQ1^o} zIr57oT)V|ALKESpr4*jpNHY#Fyi`_*4N0L)2*m)b6K$9P;vmSavT+Y9;y?HslA^r9 z&Bqo?5Xda8k_6}7dHtjh$50N^bB-!B`)9ts0VY`iu`0R`VK20BecUq+u~DN6$c#-7 z|1N@lQqf5#Z(ZQv^rmD}CV4GGeuI*Y>jZ{eO!4jltj8i!<>SOiaulDZu61{#Gx{~f z*MG10gPa?3KPNLgcI3>jQLe$?aD0qa3UJ%Q5W=ic*$1q<`^+$LVRw7H5#H78T34&E zgk&NPIv#%4*^zQ^Cz^a_@V_Cn2N&?aNu*c2c_E6B7>rB42u}Mx8e|8WtbD_*&^73g zZCCLyPzr1mi&Lp{gbqJPFSwJfuilDn6driWqgg2&j@?yNV5mNQ4UwGAc0~wuB_$WC z$=bo@sbhi6SZBKKsiC6#kgiRp;{M_LV1Ft5emDa%<1|fcBb`GU6$H&!!7LA6sN~}O zz3qbPeq5P1pdD`iea!#eU!=W7Lx*P1wAIR;WBCb+dj0#S->p_g|DWE&+|7r@Gs;$Aw2{XA z$Ns|c7ER7-5y?+19Cl*$zJ~hiGdDKu@{VZXG%QyuKD#P&Dy+{wjuLh%svoD%Ovw7C zPo9P!d0_Y}?Jh>5O0`Wk$5tefV3C&F+-Og7D%%DAn%5uLx+%$|jd|TAC4MRh5Y2y4@z z0bh5pY;j=rKnBaKyL}EF;LwN!nZQ&zFJ;QDiPloiCl?iUuPE|e zg(HYJR8t3NddxBgP~27}9QK|Bvf3w&zCMT)bPB)f4?Zg*x~%V-eJmH%Rzo4!IcP_) zELKVsiEh(RzRRV6TR=@-`CSQb@Dov zvA?J{cv(MsyO|6T7n-osR&0@_L6=j_h0GsGno$be#Ilb+mi9O-II?}n7(768B^4k0 zuX2u~(*iKvcEikyet~Qfpnb-osF4vDSl5+?)g806irBvzAB|*Uv$SW1IAA|x-xeps zv;AKn^3+rMX4cTCSg^mE@4%Ah(J+U_{?|Gwh%lD3tNyG-^i1}4GH7gE>kz*<3bU?o z{U$cCXx38%RPkV$3yOv!vC>_!kw0=ns`*IfT5D2mR`Un(mBVd9^=^#xu2R=&@(l2! zEb|igga7!Hs;)XlUfxx2A0mdV>S4^`ChW1GHTV-jrtxb#8T<6Ft zb%-RPGcTyJ!*$s0^l{v)R#t}MGV7T6!cRx2JlMUn&igp<-R-Jk(zr}FBv}RD;u)IX z-e+#*U=A4Z1JxR7k60*YirhDiDPlslKj_Yp6>S90R02AcFbD0u$Lxyy4wWac znln>`Mj3_~pQL9>DU~(H(a87-N`!`KF$?Xi!SI(XdJfdgE?e}HG{E|J>D8qN#M$j~{P$Z!% z)Mc*NLbzbe7svEtHRQ7HytRDzUDzhoqNUK+r5__X*l%tDyTgT?l2$>!oI)p419K{P z`j+znd#pYVp=MvlVYxq_in>~2Vre$advTPQ7xgD=-AcufBXOOtJZ7sRCOnugNpV(P zt{AOI_hYo77Qp&HeJLXtnYS*r7s`bDR%N|ooc!6%(vqKAsq7oQ5v=BA=hBqmj!gp9 z)=)u;gyX6cwrazn=Pf<0ANg@&Ax-LOIi`2di(lIN;^$7E4Zq)- zC#Yg8`uG2{yXeS2jd97#%NU~;&vsAwVs9Ue%_ztF(JfEm<>*uObV$AE6#vyTGokZ8 zx6|a}>hr6X**+g0E7P4@I~LtVBFDPI3ks<5GmphF=J_=MCXpfaYMc*WN{JQaNf?Cz+jO& zTf|sW35AC_rP)K*EiV%PWrD8Q-@7>27{6YZLF^Lh{#HC)XFI;0R$6Zlg zn0Ks{4jjm?6aeD~_Xx%eP2lQ*x*`)TSsdLSXfKMwkH2@)*q;f{Ph7Xme5xb${VqNk z&a8;TrFDsd-KH?hmub%9mLD!mL+;dr?ea>kN#m49V8uf8XX>MZtvJHizUKMDa)e8V zl1^b(1F+fx!~fu4cEkwI%FBnvzm)VW?u=KX!dn#xo(|5M)eSaiR^KJLGMyg<89#mZ zhbzrY0YzbI$BWxt{a{JZ2*k#hdz``GpSdt;+Nmv>UrZ65berdSuC|`2KU|D#OQ(m| zz_~4{r67A6Y^C{*^$fkKY?&sh<7mnDqf-ATQ7P}s!_75dYQf-&%#S<>|H9+~Y|>f> z67H6Rf>I7(+tJX-QMUQf?R5zjF&J%Rv|amZWDya@u-QMoY;^|?>uE;%iUJY)UsNBAuL!S zb0U5mcY%&{p2+$O#eLyei|`X$P(X30n{uol3}ftzi8p{O{=}%6tnNqozD#H3K%Njfx+fl zBJr3KUlaR->>g{Su-f1n`2Y4t2yy}GEC0&HR^RAo9ztAhc7BHs8by?Z8Bn+m)2xdm z%uxgc?Vn3soJUQ4TWWoLYnfpuyUDh(f**HZRVvH`;dHOHNVt zHS>xr_L5wycW3?2>4~*$8j@<`>aK7HZ9Ncb6sKdO)@{?q9*7QBGgiKcw9gPmHH}xH zJ@7wbchiT?s9Z2jm@qvdtdrUj7m{2X38V4I z+r0lXUnA`~J(;OsWtH|5tikXYG6Q#!4N)R9#4$9sU*K5f8Xe^UkdRx(}PbnO;Tbz@iJGnjykMj{;;*?2ddP`D8A} zJWn_usbS!Ix0h$*p7HLwJBooF6w=lpg@ep@q_l6*hx9IQ)>b*_Gw4{35XlRLPwO=7 z>^}d0vC9LOA?&XnT8O!${#+}Z;eqRvl<1B|NqC(LeHf`G^;%5d5@>L$RklbwfBWv3Og zXoB^Gc6@!wE{eH29sGW~uy7ZWL~4I;b=;fVhrc#jzN)l6bIAAl92t7Q>T*7l7X~T9 zztKI3mhCc{b$6`F+Bxn*SE!<$en%vbDL6G1EbkGPrNH)-lG1sgUKN>VuE&8#Oy730 zJ-U>N2$pU>03HHMi5znUDYMF5zj?^V515#Ih7TR2p=-%2=U0FiXt_zj3g}aZ_bur# zC`u$WUI#_f%3_k((=5ktF5|{Jms2D>Th^*hB|SFqvZ3kd3$myF z`sH{iw@eKF&s%Svm>$9hTjQdv@hgyx>Pl7cq~E1op>X!8!Ix$_VzH}vEJv`gB+of= zR<*T^s+rhK8;r)4Dw!=Q$=RSZ4){MWi2EO&hiwTm^9r(N2X&-Q;#5DIdAOwXb3D`A zO=YA4EC?$$SKee2AxZA+?)?=u5WE;0@6qpKU2>HKm7pAy*jfG{k9mZR73-INur=g- zP~ym1;#}jXx;`~IAv@&LK53Mb9_|K69+wU0+Wl4o33FuTMv~jymbb;{NV#BUeYX&q zkE7EWpNo#R3jP+{KCcG>(_-Trx7~UmQ4omjPkq##A`>OPUgVM}^a{gWR}iV{=F0jy zCNa^j0UA0WiQu6d{>F&zb(=L?Q(+alaLsyRiqspN8{lu3&Pb`AA}!7yu#P^Xfkb<6 zWIt%MJx(m?*}ejws!;{M$R9sVKT(P*yc3yGW+Th2jv(bB_8SBu1TK3kYF~*0S$1s6 zw1Buz2S?=X3o$nJl*9vALM0bReat{P)T&2I(3rXa>rC?9O^8TQes{{*Vj`bupYE2g z+&c-3rO6JW`jqCr%KjcZY%ZzGZX`UlB+f&QXMShQh6^A0@%%x08>YJJ?ZdVU&1FW@ zqUO@1RE1|VpabPe2}lr)>IV;-TA|~MZtuu4fNYc0u-_!7yfTigj*O%~rNuf~(uTju zjtqG=h?5yg>L0UpXRFcKcgNW>8_o)XCX`r^AP+aFlE2@pn<}G?fUa{*u>$j;nvwot zQKd5r?aO_0!Ak6we1b_gIV41!1Hg^DAkBJfjb|m}WiG6OH|=TGF7l5q^ZUPnGnS=^ zNN>YB^MEre(Bvu+={^Ci7_MRnM3PlNos@HYQv&ffG~({6%$TcO8)T|)Tj)7#$bJl4_p_s&tPprEjP|+l3oUlWG zHM%94VuvDUEs;03(HAzYTX|J~Gw01qts2M$pO^i3K@BlS{hf+oCfydC`M6P7x}qU( zBXQSq%2tf3Hr{+fD}lMDz*WFyGPk}5l^yDQG+x~tR=p;+Qnc+8ndG{3e|y^0Utb5+ zI(8F-Rv&+9Vt&ztI5fFIejN&@d3q%CJfJr$9N6yxJHM=4(sqPL+TDoDd*^qZr~HM_ z9daZxY+9~=jy5toXtC}AUXC++s15k|LP4t*C33JkxG?2}S_ftjjINbEJIJNw&Zb@3 z5ZQ6fkaU7aSr?yJ6jrnCk>ctwZVL!aHLcKb2q7{OUcn&ILn%_iOX`YV1RFkDsd|7L zVxB(L{&n7g!U;a5thb+l@WzI1@b`MV9;5gI($WOnqNkCLn?RZcmc|C5-XA-wKo4i1a{L9J9#<4arNhEzrOG}B@P=T?oY}tYYFEDJg>y1 zW!beZo@JB^Cp&C|{zyvV>aAdQF{|Utr{Neoy!h(KI?Tk%&$bh}%I~bx68#aA<0**u z<|}dEiMkR-#GJS=w(9Yu*gND7=-Xtb^zR*?6atwG)m!+929|o8b(T^7SGh$f@JLOq z90oPy6}lNPwH4AHdoNHe=b}CE_M9cMV7JwmJ;i+w}k z8z*-5OI|%dyTO~nZjYVLRf7SZ8I8m8{b@AoAh=^6rm4um-gN6l`V*^etB(eH;^woM z#E@u7y`YF71BKBHmqG^_iZZyxSK`AD$}nkHwe#JMepfBS&zeG}2Mo3`#YHxy2YRj7 zcxwbPkoR%$Vn6PWv?BespK3(S+1;a7*6D|Tf_jxO_Q#CQbg@0+a+6FQKgQbgZTUsh zn!0-pfaGLQ>&ndq8UA!gWh%x%BFjHZ?2+WuCg!L|^mydQ?CDjEl%6*1@ov4s8$GdW zMZ1L(8_@9DaoA$Gj;^HrY;JJ=iCLlLp^uqss94{EN}rY6wm`{edn-G)=@tn{~Inra!UPnbsjA@Mg?E>cfh$_!Nm zKZ8(atCzjrKENM$ILIIbs5aD3X^(Cqm9Z1^7`Wc*KM9h<-40INg<1tEG0HQTCfR)o zEPXJ}GCc9ENY;IA>Px&OMxKbX25c3q>mds&cDGoTFj&P6Pt#p|wZAlCJC!=S?u|M) zG&jNd9yny|Tlf(}Rm$Ab^no(p?dW>a1{jEvCY8Ro{6@$qom1(c&}r`*lC(!cnhox=NNKCa>b;ZaVFTEe;cFg!s}XX2qzF#O9B$m z-Jqe(9f$ypS&Q^!3uw**0o=smgNGBC=Ql5_ZuUe>lo&Rl7}oru2{bV4VHrWMHAVze zKf<+p{*L}z;3?jzv&x(|VlS+=6Z91bh(MjUwb~diTgNoSCx(Kr@elBMGmx-|5HsCJ zVX?q8q3jmdIWr^I_8<|H6A-HphhnCZn_bj4J2I08RDm^zeTS}Tf7zDl{U5RUA*~+4zSxh}0za48o1ZFINeMh~QWF6+50xavpaiA;=4cvWjt4ZDJ^Mg|g)1MgBTF-!c)pVD zi-h1!4zoG|dN>$T(}LC-*oCxDK@kft)_nR0^%H=q2(IO@jX`9 zzY?TIidq@>Gy4V6$pL=KMwwm91SU^!k5uw7N`ko$(L5~9 z3U$X;bGmca>rCVW4uG0KNW7sMW z7GQ?(Aqj4)asNn5zSbp_`yll3Mh%MtQx`!esOvMdNnSq?NEIX^!%_|vf$H=DJYaBe zp|tLY4Y|@$+n1Ub9+ILBj@-m&AxZ&^rv#r{cFe;lpB`05&%aN&S6`9&9xBJ(T9#Re z2ook@*Lj++SOs3?jQylA$<3IOeRqf51^P{`X!rtz#zEsc{X_mriBFfB=bmTyro@4i z(B!zI0%7L~@o1OuUDhT6r&N#2L9xFPY-LUIq`RwGQ0QfuI}JAR@$Ue{pCNfv57=*pMXz%U!+_XE-_Nl|94M5|uZ%>p*;5D@_w} zaE@A|pnKK1H_1ra!+|}VIHV~M{`7p9p%9P8CtxmQ-oTCLpfFSiQT^IU0jACfc|$uS zDH@jIC8CHEubvDUy5tmj#-;lm7~j0}czHPA5sfK`5mvIvRasS;zUsQQ*AiK{HkrSi z$ri(87~qP#kKM*4m8>{Y0f`O^@qVQIXXyshZ!bsY+=7nfUauvnma@5aIiD>|!-P%> zI76}q3H=;@k!H^(ifwv1(v@ay>A^fzB27fB#LJQ-PMn}GJ2b<5hYxC{73%mDx0uLD zUCdPhCZjRnP#e}obnKLO=9G=TPfolNX`~4Lit(>Hp~h;tA}$kERkU7dy@Y;71_ax- zMhW5n@Js6mbKtEQ>YOGWC$4Dx*baRFmRWqh+PhXCL}3C5N-l4mR?lh9o~?m*dT;#J zpP5Hjh4BwxikcNDp(k~ekqOp!Lk)oatIc!BAc#&e+VJUDLORSfJry8P+5W199?N~w zVA4nc6}f8!#a?GFOelc`=9z2S=MD?;A)gBBb3F_{IXVeZc0Cr?N9*cL7S5tD2^KZ_ zA+?HE@;~%_Nw*pQ5ld{+Yp2?$9CFkdFcBeHr zRtO_UDBYb-2TH(eqiO^~F+3I~06bjO*IN@x zu$uo1m?@!9wIBIyXbCSf%1?+>e1Eap1BtYit{{7=lYL3}F>hIbPl|a^l)%t4Y(eWqET13^pz48HpC1mG}B%!Otf0tpovMcZ( z@H93F%J0tl;`v@Bu=@<6lhJ)Mnq-Lb_{sOPe(lrkbX`nimo5_HRkx2|1%>j~v~ht^ zF|?Y`ImO~=a~eKcbpv8KD70o;=fh+LC!tk|EX_pexb$;jz`^PHcoW^FTDyd4UeXL9t};?3_*eY!29<+QJ zvCrH4>I>~5iE^P_`B1$t(0LVBuU3rP!H?ar z{&}(sIoN^HGL92%?-`>WUNFCE(~fPz5J+yoRKEQOBudc`LwM^l!p(M^Hb{Js30f#E zFM8#@QzM(6h5d1y&*P^Al#LI=kb}TJ{!WBMty!3SWSPEosBS#>61+{u>%}wKu=Ib+ z+co<3p_0s1vI5dtV^NhiBQAd<>&1TRdr-NW>pqn3;o`Nd4*=8zi7hYbVLbeiz>7q34##LFb@HeJgNiAEPGH##FG&=k%#rIGUyNn*ZB zus}NlF+}{H2xT=CKupx$$7PJp6TIc|+$J4uVa@lt1gs0;#8hTeGU0%j?$$At?+ zNSVuC!I7GO<;kpe2`3Ew6hT~aAws6Oh=g{SuKz|pOH^_~Ghds> zDEogWM%R#?+!G+_(x<9L_n<89@i4~UCKmZ{Z!5np7ae;l5m8451Qe7eD4yr;U=j^L zaTjb;L$7s;BR9a`bqJwRESNug2F zu;dg7~44|V4mg2HIzCW97?iee>}jKekPvc5%354f7p5l z9#NRE%eHOXwr$(CZJy?7+qP}nwr$(Cd(OSdOuo##zo3#zDph%E@3l?=CxjfDi40lC zyucs>9$f7E6vyoterq;K>h)igk-hgQZRKsb`h&XL;_|j-y^QL0hv0+e0|>x5wRGJ_ z(ni^Aa76i6r|gnOLy4P^Cfd|>H`;0wHU0pbXQav@uSFR|{lr8dtYDC=8a%Wa|G8ol zl8tcjujI*bJGU4atbmuI3`}CUPmk_Of7)X$5+}fuH>+9oUHL6$v2um$JeFgt$5@cS zZsv5_a*@Nnv57enUZFZE7%z(XYQC~6xm@mkR;<~35@n7&|6X2(d10k&K*mk@V{ zTQz1k%nhl^={EGmOocRAcCy86gB}h$Nfk{$oUoZZgtI764ip&lb5qIq$ znG;#<{fi3Cie=2%SKoCix7~aqp@g2NaG7!-F?hUWTjVz!d-LCuekSNTKA@LMK@Uv# zd-h^yL*+Cj=89_^2$9-=iWLLx1)%3J+7Qh;ev;DdtF9F-wGY_DaSFVua$wDx;VfX1 zQuYK9idB9fyOEtmseqbAuK%6e4@VISrApA|F35TD$1axbLJh&_1sQa?aqAgH>$PHg zGLzyx7q)2)Ox9eF`ormjE?wDN@PloG5l?f~b|6xYhqVSKtPb2epBpNN-3+(1vhc6! zfY$hDPJtqT<74t{lE0Dhu_68Mwdq@tSUPMSXv<3wD&qq&lUrPusf3WP50dzpU;;`d z@L$IjSkt1z;)S;@i}Z~l;nvdVs4as2GAylIgDfV#vKA&>-%lwQuF0P&fh_FRJ!Zw+rxHV_ zmMkkCh33in^H13S~I445(2GqP|{Q4HKcr(%Bz=LBoh~U;(!tTmjnN1Q{f@`2_}Otf!oYftGGH zRlyJ}K)p6i(qzK7CMGian*|fKo&CYau{kRsujVp~2hrH8;SY=D!m?9F# zl*VgBh{-c=JSj0^w)uxb#z3=iR#XU?4_5klpwRi?paASw-i3`2@yHkwkLqAznO?gy zw@u*RS93o27{*}Q3mj^ViFV}uzf3{<-@V}HZxa2>pRLS#WD()tcZ~kZb&E#BOaVnz z9=MT<|9o|Af+8yPrQc3em;~MkY`caRxg=K(#L} z_;u&xI2G%MG-qux=RTo;M0rIvvnx%gk$X0-QBHR(N2jEXROk_Sy6vWb5tUxi<1w%FXSgcbFs`$u1azW!cRw=AqUzK%+Lc@=}koXH|-+>XHFbcjk_RxA6LGoK$^S-GpQ2Gp#5 ztV_^Y2SD_PeLIO|Nz*qWJUASZ8!5xX8^5jCJ}~OP8U5=US{Sy=I#PYHVamS&BmOXq z2J2|f=D5>*LhdTv!!HIKP)_Y2rW*4wQrEO_#tq`*wQl%jp1&YnDSI9+LKfA>JSn<6 zOz%)CDsXO9f$e|X)(#CIc!#;6y^9*V`Z=`?5yvX_pYV`!OaiQuf=Kf0HmYfIlKDg8 zMHV=g@K&n`$HCma;{W+oDk~bpoX1qfilm(S$@Ps}N?@D#4zUHOj@9$hWUlVwM7a@!AN%`@9?)16hbRB9!RP%$JouAC*86ZtQ6vPrYgMvdVeiVN;j8vIzXRcupI@t2tO|_^aP8*NRDXQbnm>*L~?23k@kZzfaWr@o_gNC0} z#0T5k@YFYO9X*csQCs3S)|<4+J)Ie?7H7*i3m^Vi{c_Pkl4%^>koa_^l4U+=@-fT; z`8-5fgDST$`&IJHYVaCM{K5E8Z~y zP+%pJFe~Uu7^-oPThYF1TqyMy8<|uc_AZg&b{2YJpQ29+m_SEZQVple6RiTG#MO-} zZ7;{l-*y$(Z*UE&s}78nnMuW|3ZlG=y49U8^ieNWY1KBz1K~=wQJ&>L48_aor-&Ccy&Ho7YJxq2t?v!d?GCN_Totn59aC2a8X?OU zZ*|M}b$y7WJDk-TGe1T4mWN~_$kuvs`$v^P2Xv{#l@~b|6s6J&* z$VG~VGzi6^`BhStp`#g6doKalXHC%mk!@P$V{OtleMaM0lB#=H+e5-jftU(33pF> zNrdp}`@Az6kk`&%o+Rpf}273uZX6>hLhik`M7fw6+qrcgeMmD@V|WtBZo3 zQ5ZL@Tuvq5gPN4ZHL1TWhhRN<-fv+b5al9xL%FF84Y(4l%xIFv<*ssVYJn3*9aFu_ z?J}QCIds9L7~55}xSo<#5q)gVGRlSAR52ed^v?&#y|pMozJdgg_qneH!vbVPa#%c- zNtMK|TnUK)s}P4l-*13|w~;PGhk~3gUIXvy-NfT=*qfiS;e+Vw8(_~ zdw9{RFCKZc<5gNh2{&p}b$>E5sGlduQvd8i^r#T0Eq)q2dNKn(k+!n+wCUFNw|E6l zhTW0Q3NMQ6k?A&K1=({BHPQsmTI9MOV1|?(E|>KBUF%*$(7!x7n;fH_2hOGJYfN$H zj`%cEJ1qVinOrcBj0t}@tWTglMrm-n*~8l(R8-IoM@lPz9 zEQbMMQRErbj+`6SK1jbf@ad3+3J|@u=LSNNHJZ5RxtFT|DN1V90)($td2DYOrqQb= z60~$8*vPETFQur7P+v0nthw?asw9NGaNSp}VRX}HA9U%sJ6+CaSH7YJ^-j47bLWL7 zzbE*ga);m;zv>bOB_NJ)vsf9vAoavx?czU+KS<%9O=G&df1Y3J@E97_-@>ywTb*wD z_1Y(8n0iSTxM)D~NYkp92lq7PAI)=@{XnR%BdPEj{fcHt=+T9ppR_l-O4z67EXvHC z3O}9au)M*RQ)xg(m#ep@d}5~k`iv4~*YJ7EG>q31?T~(ql0q3z<6_a;A>g;0_RnnX zp{7i$++H%_6|fRT)kLn|1m(>zGdkFL>uV|yDk7UMjvJM`Y*U0!q$Ai<3PCu8md}>E z_qp6f1%F`QQS2ZBt$AK5y%&I-EeAOS|LaK zCgYyh@f-|8r_3z%dFQ&3rN_=?l_Q+T2#od`fLL9DRh}#pC754;*FA_QQQf#cioqSz zpTGv;zC!clNN|aya^vva-IRR$KMfY>6e2LPAj}M|v*DhM6ZiPldC;l1VAa@u$m!HvE%rjI5 zAl5%OQRN~*V{`T}dLxg~4kt>1Mti;Bpv-xj-vvD+y*%G6XNlSHUm?2GnW z6vEftYiZt0k`)+!!GH zs8bYOh$9(cV(N!Rj%=Km^#aqj?dOmP9%|2!u@@(lJF8vZZ;ksDPI<)D0YefSf74T? z&$+_Wb5YsJg=s#r;V;qWTs4zO#m0(e!u+GVPicW+W5Q$^_0`n#oFQct!cni0T$5M6 zzzeKGsec;e<<)C(z#tP7nJt6zB(=4^KTzCRf1qcFbxslc!c_R|btZ@1A=v^lP61Hfq=%rLcVf7ZxAiijp_r0G^f$(K<^uy}iaMO*;1kL+Q&ZtK*NGqM9GSWGs zLT%z&d9~}SWj73t4___k-kA#aJ-7WA()tuE6s|U_kfk4~A#dIP7)vx?dtww@T2}O?=yQ;rCg+hafhgORR5)f zfB6-cw?1p~Gi|$R-{X;nP7M$weLB9;;fp#WbK4Y#QK}@L>qkbJd~6cxd;IRECc~yq zXUV>vgxi%h#6w;qyO_dexkFhvFX>j#u39aYZy)9VQ?YInViIaDWw#|pi5m&DT&+M% zj%tsb7#-r1vhQC?LxkP+ms;sO9AcB6ACb?(M?}gtBX_pR9wV>c3wL=>YZ~H1EtB)U zXt^}}&mE*oseEgRtC5?p`Tv(THWVpuNP<9 zdDzq5>99@W#b=hA=uK{^9@QG)B0)ev^_|@E0GSZt#yU8-^kJonMwRei&_06|9nzot z;xxFyw(s?4i`q1oMgiTjLL!p8F~^W@RyyXwEW32+H46YS?_7P9_DzClWawu!Sc+s5 zH&`gW%gs0lDN8udr=Hetb>;w_T`5O%k;VCJ#W1G9|9}Xi-{JfIhbOI$AlzV_Q#_jZ zD8l(+l-|dJPq9lDRIQPrGU~quJQr&Pt);cj3_WWereL!Pj z@*Izyc95>odDx`vKs!&cdjZWNO|_2X7q;55&v{OY$&)!)Hxzz#rtP0#BghnnVAREs zSrK5}-}g^x0ZF`Ldeo<+%S5CL;jKkDG z=p+o95d3=JC(^H5vqA55vJmIr+lGI!8!ra6;B#-IQ5QrJN-CwOqfUG*e3`{Dh{;p* zQYhOWML1ivrw5P~;}M6XR;5W*PTvjdK^Tkh6ELU z?joWSg9UVV!+09fAot78lU)V6&4fPYVb~@xPQDvRIA~goAdhwNcGnlmn0|Cm6!MA0|#LuSnd0u(4)sie3bJ6A!mxD0U;o4=l-u zZB>s5KWk`Q1UZLqt^m~;)=|yd~(&r2*>RvbfubV7tnRT_vsTfBcSRy zfiQHYQ!RwfBE*iSpL?&VHh90G?UkQb4|aGMQykBz9O&wFw$XImk?GUR($L7{8fv?m z>Q}TC%4-iFA!0AMMHfy+evO43D(A6eDorXAex4(^X3N5M$<=qk8YaK+FN)O^x#L)3 zyZs?(d&Ozx7s+!Jgud|z);+)NJ^07tBICfD4MTG%MrbGU+!iKKBQEa1Y*zL|ooA2v zJ~3SlHrYE8uJMHV~ujDS~45TyE{6#>{lq{38b zRc4=#M=-`IVi@Z5#g27qdE<5_(eVv?rN5}%7P;ARln*54yCKPm>C64|H>2KB|Am7T zs+j2RppYW7;dp=3RB&(#Qye#Fu{5p1$Xkt6FcohT{8xQ0Y;U>gbsdrZHl$IW* zczzj}&?yU}NW7+{5XIBHL3LyVk`{T#U2k(#Tb78ts7e1-mrN-DhobC?ifHouMe;vJ z?lOCS{jJJAb2F`3p@Efx>5Jr#|4C#0+1%9t5?H5SlWuXXfpy@kt9y&k{#hXnS@g({ zTOZnfLKCZu5zPfNq4?;&ubej)uO`i3Rf>%j6-NLn>nigN(T~1*<{aaKT?m!Rjcuq| z!bx=gcF6mC-@Ubf&z=jVYsfhDu&YU6347WVS)|eRPQ@KCR2VOh09}Zl)J>w0rL8}$ zVi+@N(_FE~Wb=+MVu~<4nRhUfZXnef39An${+D?9R0mt>Qe+4uLAJ_~^l@{3u|1b= zwddqA{Q^!wz88WdWA*x1-9??@*B=Kf#Wak4*j%d=t8>Z_?)ylS%I|-P4T;0hJ6I*0 zbR=epwz+tq>dKPtqTQSkh!yr#3@!VS?fsfKMm*^zPv4v z^n-8i^$-|hY>1?Z^L2%K(T@g5$7gDNI$5pKxUDSt&_#^e;BSBPPhs=Cgp;UKq-?c1 zt7a_SbV~*oANr8DHRg!W%5WV)gh-i9i4>Hss{FM366by9G%SwQ`lBJt&V9ySz3o-D zI!>YRz5IQ5$=a0=w@b(6MT~vcH$zjGZ6tOCD3j3T`Zo%4#L*`(!7EvD$MV1s9x{5} zv6n{@0z`9Ns`F@lmKP;nC*vZ|vX+fm>g~GsZOgx=ek_?KbGA?)*bPiz{=@_XYavL)qCu zqjUot{Kyv9U0qPO%-_Ts^B)8ft;ufq=Djlqt3M@KRX>R+zKd`V6&f3?%_7LNaHCl$ zzgpk27l*~n&|}3ulr{3ZvfXR~&RWoPiuJ92aPAtd)b~j$h?8=%P&U^R7ydrLOKwH!(+Dg3Y}q^s{0sasgMrK)tKR^dQ7>Y>gN7Dq zoeG^zbK4Gq((vWN7-cQ(a zbF-btf8m=QOYv3R3Ma1F&!Pc6>cXqMq0ZG+vrlNuKs4yGu|k;;L>;}pWpXb=96C0X zMuvys;@M}cIr+N##vY=IpW?jCp9^&)LqW{Ev%m-3?ftzUC4kL4N%aJ_ zvwUY@qNK=o`vvTA%(Y#35|Y?4Yq{700|+}Q^x(Fiw#20B3Yuhl z+fRyhLorOB@(Y72;C?kioDR{jDj_oYQz~Ahxt~X1?O^0Q+i(mZRYW<*wlYbk^XPv^ z-y1j|?^=cM70;w>$hgGp)XE6)*A>prlPK?qTJoo zIQ8xmHrEyv63gsRz~DW0FoF@Z9KXKM49j$EqUMQ2t;3mzLlX(p#4u7X{q$Ltd)E`Hqn~Miq z`~q3o?agiqsfntN88eSF6*@?hlUz;pF*l1XT2StaAV7M)Zo#P^hAbg5CemoGcgI(} zWOe*ld?NPiI-q7_0z}`8i^naRphanE0B*$S!LbpgB|DepxiArZX}%HyK88Ovh#Q@vn*M|h&RSh;`=ACoQB@)(|2KWYx#1D(>x|#TUJTQr{+J;-0{eOg7#OonqNk~C<~5(%Hx^uzbkLw9$yioIn0 zC|7I(nyV(QoTbpp{`J*Urq{LtpxvO|^nCO+d>gb2QdGpSrgNQ7Ctu?l)GPBH4c<^g z4C02(j;VA?Y-ZO?x?~gCMVXM8MRL%Ww%o^j-5UejZuS*&+ismViRMMm5&>iOZGkWS zo*K3Pw43sK01gpSjzY;TcernPGn*&eCJ5o&ADbd=Exg!Y6ghcx*gpg$(0`W5+TBtW zM#pKbyat0Gf?vR5+CmilksSN8-7fA{gB22a(zPb0$sv9Mykm2EL);N;DoG zYFaGMKCg}uZBFx{Tc7ehgyo2-U>|>O=U)BqKK1HNG#?1T^eWy|rf!;Rb({ezTjs9+h?|K4UKc{>?@@5?PBA5lsw} z#crSCJFxTS?O#nM=5o_ZUU?k1a9|O<0sv2OeR_E_WN*h#eC>zHwlAD6GjUkchX|!2 zm-4DMCz*obYLN6Qd^Oz&qAQRnOu4?H;S@t&k}@2Ip!uvSwLt`Si?=|56&ZVgR47SI z7fKdZU#TDoq^_v}r$Z0((oqF8n+6vx=m-!(mq`W93veGtU()TLqI9&j!U#$jO^nlU z!XK};Sb@`OL~$sOSqp@9cmRech__%a1-7XEIq}4tZFM8o0)bN!1*W7y*PnOQ*L_~E z(Mk~Al0ISY){jkw(fI-0gLVxxxo`3oES@VWACIDNcR}?0X7?zB*;`pvbD@mH-|k0JOTJ48 z5#!f_ZA&e3KbY?9-!y()Iu6%7ImU$`H8`^x71>_`blJ~R zbPzA!i0>wm+S|gC$ozK)9QE0;qHX+~rjjAa^sN!=sr}{>UFC@h4^tFH*qr@Aj23!8 z87cNEjB(}CZ@~QazcAts3($nGCBeFE`xAgwQvs#}w;pdbeq?5vaaNq>NpQD|zso(= zAc1i43WNbODH9+STO+IpNIWHH1R)_nsHV>ag-rUar0Rf>AX}k}ZT{i;lIpBlGD19Y zfQcSw^|Rs+Nat#n4Z*wZMNNyygs|$L_^q|)4R0XTQdHTYpnpvlx_fY9o799F`+2}{ zwFJYKp~sXw+rD8i?pvEiUUTYzNab%m>n$fgwud^gQI*Q@d%HGCMm+*n+%}~_1gD44 zYK6{yrGa>m`vLg!W48B@+;LUxo!O*fs)Ri00zTdC&m5A{3Be+5H=w-QioTZ9j=^A>-QGxtsWeHzcL8vdQh(i zame^#9~{@U64*V2EUiKw0j3w_ril&nF$abFT+}rME)a1Fr8gX+6`J^AK|bUcHXxEd zbb^!;w$zPo?pIg~H>aus05v!)x|LB!Ykhj0<8gJ62emh&eH#5?Q=1$6Ev9Nvk$TL)Bx;V}XC~DW0)W6whlXZ1AR4Zh9xmK-hOxYW* zt+TVy7N$icjK;B@cQgZDOGJGXBeUpQ9ujkL^i38_1hBTTgQL-bAgY#{;wX`TS3NOI z1bh<84-AZ7?SLv=)FuyL{e3S5Kidct?53CxD!^KywJ@I%8LTW=&^+(}Bb{#!T76{$ z_GJZ51R?ivUgVFLkcn`hcV_Sp3HBkthj7p~r-b`Qbrd&4~J5Sy4Vq;Ig zG;sw|p!ScWz%IV4e0<{dx3dBmnSUxz`Uc&GrBrF8|EE|jN2*qRpSWXiYbT&qsY^W$ z{SrLxlNMoWhnmE1sTVWPa0K8yF|%MDSj`#F-I^%)d2ji#0g-nl(;f>|PH1M8UX0#C zpE4T4Eu3hKDZ@y|$#Wh??p`%X{Z0cENw+|=-%ilHT1& zsuzxwy8N@v<~RLy1Q6lfX9xG?o@4}e7ELa@c5_V(O}@8l^0aED0r012%LXGFp&)W7 z#P!I{+Y}V%QBjS%47jUNnZ*Vw@ymu<_>%|vk0f1)msSc|U#JK`$#YUD?~d z)L%^}Eaxq0LS3J}fWaZ0gpLV4HULwPaOnxUy%j9X`v_Uc+>MkV!JzDZ9l%-sYz$y_%_=W%>Q$+0++ z4L&{qvQa$1R;>=a=5fHC$9+@5g)3A;A#?)dEgCv7m1tF7DZ5RQH-eZRWJ0;{J%a2N zzeoqbO|QmSx+S&9C6jT3&4-B$693cp>!gJX>eqA)rQ%4KkAZa9!h9q$g=Z@{gD3CY z@cI5Ft7isbvR44QKr|@2?LYs4m#T&{RFGXcuo>Ly>hIFcPaj4FNZUvRgHI6LgB^`j ze%xzugbYGH#8}U3)ln{A`UbaSoxMV96o#O_1RvT>l2|7Yn96F?Vg;S z75AWFD5V=9Z-4?u$d2OxdxUy)Aqt3H-r*Y{7{zSiihEte(OYU$0>emewxFKNS008- zrktun8Uvb(ljqTa#g&#-!t{TlO=At&ss3$he@V4X=JlsPCqmH5_mLV_!w#aBmAh1)gSTIRPPlz4b`$1U}_Bb;k;T{2@hk3$aC6z~LWG&7J9A2skUe3+=gGsCRA*wN$GDeU#_++7SJyB#RfVLwSC6MxR+7(Zd8WgD0MgLpX7|L<*o{qJb(X-k=q_NVof6QBNo5IBL{90rk#D}Nj1t(h`~L` zU{MQcn2V4g`AjsUw-Cnzp}!{0#LX3nij31XO}6?VT;(Dk^o@+k9U|XT((800EV9he zUD?P!_J^AtRqTRYdTm#RaAPjzTIb8nE4q49DAduWUVwUX}dh-ay zZeiZ|!nB6ky4s^!8oUwEx0|}@J9Z5Cw0y0{JcexF+$w6kLi!JMoP|jieHVVwnRfX0 zD6{IADAQ|R;PUo*F%?`4$C!q|WVzhFLC1deL@TN3VgQV8UxcG6xFw9?^PxaQMI-K& z`*)oz+cS4;srbnHZ!EcRwQ@;6(7huQ#CS%_R9z>ZLMgNrM^8n*YYD`cq6;EOXUfGJ zqZ~2OkouKb%uB<6XgMw=O;P^Dw2dcgZ}A}bveH!UbiEhQ=SqX&_GfNhP4F7chNw~C zxLL$~IDTrS@l}5Ej7Bz+3o%+<=eBAJjaz1rVlHQ*y>F5yf^GxQD?D0--w8l}3>GsV z$qP=%twN2PBs*#X`zo=&lQUI76*Y6Qw4RQ?*v`9x64(sLXy^*|7tg9 z6Fl4oLaXpC<(M>8v1jejxih0ua#HZ}iZJSuDs{tLFaPk4{U^tFbW27Xl%8J*Q-fAC zDKm=eFL)_wV=sn&?m?{LGrp6?taPMJe5=m@BJbofu`)o^9)AdR9S(z*HYF;+``7}zNTb)+_T2l8_XlKgORlS#n1_cA?x&v#dEOo}WEI(ya{mJzqd_6hh ze<3NBcGr5jNFv8?!Nu4o!J9z4Y`ulW)kt3k8yLC;Dwo#_XM?|M;$=4*Vjo*MF#i-3qNvb5Fm8=6}o4>B3kY>g`X82DGt zy~y@w?z8z#5g9sf=j`s4BY%k^yvJQdmp!Z`%}gq|N_3xYLfSLGCe;a&$KZFl3}v1^ zR65KHOxi>v|6M+Yfc)}7vV5s)clm-`D~9dz9xDXZJ97plM-*|4l@Np^2E`9m$N1>CINHk&raDE`DPP6V zH}=>{n{5HZL#MW|OQ2mJB^Q3e z@Scp;py>hVsuevh_cvzm(%G7LvH-sz8JGm_p&$#R#v(>ko^&H^>}(~=Zy#&hb}BM# z)L&~o>cbgh6O38~e%*RFmlf=f?wMSPe1tm;c(ul!Z8?M3OxHl-4CObIhrF~DwBp(-DduVA6$1E z0o9^NXCW{S8cWwI{2xw4>kLM90JH4u2;MY(ksSkvv!hyb5oLtMDWC8T{~J3oY7Fb$ z&7=>Z&$al%Ywp@+hRqBs&P~%}i*2rmZBR&9s#W`7c0QVG5qoV#28?6}6wge{DQ6OQ z{wzlAj0ye~vMKQe@hQXqb93WzL0DDgWR+v`pn8*p-y$SOXYX8J?OAVCw=;qZZHeav z8A1I7Q^fyqM0fwrV0T_)@l&&7Q_-!&t`p*6anTQ}CJ7CKAR_PMOi#@HvKa;i?hHjW z{p&N#eqh($o!-Jn7m>u1G(3k$Ghi)B5#!*V+D)EpQ5d@W^{jOP=hZ_zYn$MuS?uRc z{FkFi?dI4B!7}aj^W&9~8^1WbZ`-D7Twwo)(Rcyxh&De18Yicml(f_|2E`wx1|{G# z|I=IKt6AcP?%9@4{eduVfh<`gidwY4bFO6Z{!7;2QUZ3g-Dcuw$1#LlGUR$;edhieo}%QlD&oV0jEc zm%?#tJto_L43Xv?1OIX1g*}fbBbX+pz?l+$=9((|nd803`_8*8lP`rRQpIOBTvgy) z?44-+Vn}Ori9A3E*MUL^9tzPYrOn@&jUD>^t|yz^6CbN-&T)~EX%Ft^j!h_{HCQC3jyi!)2CU&vnmmC9K<<~q@(Lr?^6t2e+nx`g!5rhahw zDz$w+1>|W$3=Y5RNAi9Y{RI{Pt^hN=3$%UlLAkF5xxIx;qPK`zuVrBip}$F>xe@In z*3`6hSl&W9^Ve{_EO7(SZKBn)KJZ!8AC%e@b71UE*0Nh$VH5aoASw94l5o7c_6GCl zuM~%Q9(y*@ay?;?@BUBP3FJxlyIgyGEL$V{NP-?rb9V|)Yy3e}voyP@o-nfj^P3qH zfXw1sL@a^1Q#si_r?0o`@@?O4kZ#P&l2}bI&>a1XatT9 zi*XEO0kT4`$p!kf*8Gh>_RkDT;Ie*YB)>^f&569Poo0;S#v2|!BJUEZ^@)OD@WtCw zEG>@5ivd0g*-+Yob9Mi^2Lda@ZM)@m0HW$NMr=Vi4v!K^s%Mgk>WMQM79`VE9UWIP z^K-rOdRqV#XK>Wev@nU2&w8@&hCL}Im!HxCg*x3V0~lS)HTzXt@i;3wH!cV3->v zxOVnb0+fE=3W4o?L#qQ0!5L!zVlkf)NoFZ9u59WKkzYT!+QtykU8{&b_sE)dbwAwi0VZs_T-(G zSkM1TC)%!TTun1HP1po$q`<{r;v`D%E+2@Ygx zu&F;V;L!JwTHq^#MQ~MRE%fGv@awrv(yE>)yrHKme&$Em*sxsUoxBX?g_?dsD?d>O zxct5m^q~ylJg=VA6#Misxvx-saV?W29y~#$FJ+lY_rEav(B- zo%}hDZZ}&o73k)h)hPcJ4{RH12vK6;3=9axB1x#CyPn-_zARz;Hjrjwgxye(&?1+e z{>HpYwHNj*+tk=vCz7pjp~IZ_H}d~dg@SAsz|NmdT2KYPGj4r*2nQ5(lR%z>h;D3>R5c0|9PY!Xd`i$CuAu}!(Ro1_xX6dGh@=M1`Ec4`zWxO!)FR$$)#%(>rMvc zW-!C6dHVg}1b~ZTtcqDTwBzp}x^bJZ7B4az`eIINkTbW#2L@?;yqDy_#1AxHEIuec zt$m3HtiP_c`FUzBA@F4U+LXsg-rw7K!l~<4c>t5dnY_lqI33oA8!YbQGVVh5&0e))Wag&;&Y&cw3?3*4F`12ZpfoL`x`;dxfzlqVCLfN(x^wFkYn6tMHy zuDr!_%_$H-fv;XIUpFBc&N~ErbHY*`@e%=s(b6d3VP~ocO03O-X$A|P>o_kKWkaM_ z6MQ_Q$t(Z0N~U#t-osYpWFxK=FPUcs1}@fDLVjpg@J#}ze>`q8Sprk{y@!Ap+WDRW zr#4ZRE*uqyYTZkXn`Df57&D%$Uaf@&STb-VOKqOsN*DQQmZWoFdXJ*ZjY8253t4bE zPEq=#Lysh%E=PN|%gweavLV)qd4X38LfY0g4!zfY|MyD6f2P-`i<_Y;;u51SV-$l4 zfRoUtH@72DbBrBueaWKpUCbRxu3djFXr!_qRk!$ID z@F33Gt0~DXsv25WJjx~H@neQl)mEew%7DSzs$>M;$Uk3z^Kiejx0r8J+L=GW)Wl0E z;KtC855Xp5br}e-`>jA@`9Gl5o4LVbP%QW+c0uq{}&P?jr36)R|cNd?-gTaHT8~$e*eJYF3oRQQlsZkg~~+!-2NJG8+4< zDy@pISB1bAb-{W-pOh*5ht`|zGlm-dhi@#hL^za>?LZf$R64<|&+&mC;wBaiaal#U zq_6#eVMbFyKCI>D2p;(fPYa#)&NqyCfzg$JYCHg(&|#A?m)1 zMmj8%A2TzdqC?(Lzs6~cX@K~nK@=v8BXSw1q~1;Lz!f=74Q z=*>%I_!$*?r|>0DXs1Mdqf=>cp;zgX?G7#t1LNB28OAiF`cO~5p^DvwL+e03Dzr~P z@!^;zN5^pc%T6mT4E4cCv~dVQxttxfst7T?JOpKL3SsSc zVY)%3N6T8AJ{*(Pc5z++CS|i&(2Z|^5+i*X9uH`aFUDSDNMifEM;1lKC0VIa5!_n2Hkajhhk1Ly{PdH>D*LIm?hZg$ zbF8}b3%T`Q6^}do#9QxcBiR61>VfuoBjLOHo7zq-Iz^oO1g?0S#)n}2!{Rb&{kMJa zOdo>nkshhz;ex829n_ZF&${#p;$EI)ZQMBm)!LLQ{i#G`-u)YNSh!{TNuEr%zt0;p z+0Y{nx?*^zlX1koDLaf!&(kwP&DZ}czs$H%^-70GBPlRb{#yMUhZPw*DXeOMYZoB} z823Q{3RDi^AU3k1mYj{L|95fz78DHEulaDyW31E`O%sd#LhPdaIM9LLw#{NZ8~yIe zhygljUaL9+TSfCh`Gdjf+ljMQeKx9pM+!S{T@b|0yI_qX8hn!6h#!)zeuWe2|9jh; z?SgTfPn@lkrKZsJ%xn|xJW)YSI%&Xh+>+yfXTrF7Oamcw7v0%Cd;WaDcc{7WU>Fp2 z5T%351@qr-olC&OsqT~Q?^4mMOBC&F&%BsiedB^K2u}2%ql+IPh9FDy7Wc$=!4_+~dA-^GU+C`8Da6i{8F0n}5N-DW zBywo=kZM&GyJGw-E~P(@MxWd~*eE2_VrhLu2pJ!@R^j*tzf_rEM$B5Y?PGAQz2*qm z_zLis)637UT)zpGNc*vJe;t(3?CUm}^d0xHoPn^^ls&cDM^uCq)|pUlK*KlL`DNQw zYSP`CZaJ@X`kCit=)`1@pNDXG^!@DGl4L!E2xwR^j&P%KSfT zp|@B!2)T^u7-3dJYRoC4a-R{;cCg4GvN^and&$A9?@}hnfbxBV!JZG0%wh!}J+>{w z&-5pVY-FmY$aDbPS`vfc?iE~@h3;E}%{hR-&A@%C&KKe5uoB=YJi+|YGQ#%8`pTF% zHce-t6my&NAt$m?E*V~)`rCB8RFIran_fk?KngpK4A+J%f{HX2tzOm_687$-Z9D8(2d*d^Zd`Fe-cN4VI0@C zy=D@fS8el&J>=q``L4aCA)P$7pYDS7jiu43}>b~DP@ZqfmM8za>HeytB@kSWeo=^LLvEK1Qsw8!EgP&EnablapXG}+Kt6R*2$vOAP#OD!9vTCZK%mAQZ zN#ZM!znZ}-|9*oI4)%ipo)nM{lN{(y(s`CVVVJj7WTi-`u=;p{J&dp zy(qX!rbn%Mfah;aQ^)rA6d1O<7g?e5-b0;#wJN=K_qhJbK-G7y{jc`kA-WPK{MU_b z+qP}(*mgSJvC*+@+qTg`cWgU5wvF!KeE)mbU3YlqXPuf>jov|3y{qc?JeN%;jZdJp z_|4Eiyp7t*k@9h)U`kL#hLs(m3Gs{xSszYj;jh2B_%3qFWeDGI-t7Nf<;me9@I^8m@~Wq&NlNhH0c(=brrq-SYo$ zb^f(*qHbGW38q<>B`g}|sv{E$UAc`Gr)=o<19O8+Y&QmV2Fu7#w_mzgmFu)k7Rom9D;e6zdZ!DVB!+EbeuCm8yta&0ES&rw>F~}V^ z5~PfD2&u#wD`4@*FG9r@9Y|;=lIyS6_tk-)y6axZI`kCgGCTprH2S}qQ=nD*hJ(Ph zz1X1;eb?3$wj{@tSSSFuZ{G9mCQ_B!ig?Q^v7N;9;TJ0chx21WPIOD6-mT-AlJ_dz z+9eb7xV=g$=p$belBYS$JcV4Oxa`ty76A^8iQu5>kA5IN63mZVPu_!+dc?7hvGHQq z)j4%K2Wv&AO3E-ZD7pa#PlL*`fLpcXTCtpKF+z4DwwNY{@6W?lg{$k2d&-liUugom z(D@#wNpF)mM|<|>NxN7|Q+Q*F8BZ(f>w=Gu3{>Db<@$5t={yl}1{z&C7hl-lP zok-te$K?>epePkOu7#d1$wKwg$Ge?g5o57V#f;B(Pq#4|Y(W3CE>o6bRiVHBKdUbA zk*!6^!;BU=GS)=I+#OMVTR!=Kt%X&<-jhnLn5C_YtX`J^#YmQ#K|_hR5f!{ind`U4o~E_|W2Vaj9U6L&cJdzIxAKauA= zxMTM=79D2eh=p+8Ige!;wZaF~Q(fl&w}alhgqP`MJbbIME2pFd2qC&V&HT2S6tYp( z3zr<5hEOB_k}o=%7N`k)ymsEm6OmI;V!`v3$p`<=>42`{kd>cS&~oFes03+H1}stP zw%Wq$wP9>8(nQ*wXLzfx@2|_Vl6&Oi+Bsnz0kB6YDOI|;Gg zRO#~@)ff|`8xbZRdgQ7BrqsTuqzt#`ucQ=->@Lrx>1TsC&m-J!{LUgM1+>O(WA&tA zxBk+7kb)lXBZY66;$=>5Y=#@_wFc2C|KqX;xY-HsxW#aeTUL~Wn~$fmA8z$yMmd{9 z`ru!KhsxNWa&;4VvB02_+qD5}h6$cr1NC?+aCj(flFN-r-Y|!fz=9nq@BNKN+qXmm zcjcX#?8D9xcP*jysfUyKH8x49Kkr2=A6J1G4CW?$&T>UVdgO`fHWMXL2jKFv)gDZ# z(GiJ&a|UH@RIN%Ezv;n$LaY>WPX&rBLod*E>Mly9`D@VeJf^Ep%7>xMmsPyIV{~mz zw=Npn$%?sR+gY(~u2?I!ZQD*(Y}>YN+ji#3`+j?$v(Jy)+HLp8ouiF5s-Eig6h`l} z%~@mg>Qqj@x5=JPFApIJN=2t|@QGicxMV8UBRrnJ93 zrac;u0^T?i^Bbf1i8QVytupt(gBMnu=gkth!^ABkruefak@ba6^E!?hdA7FLKJs>= zu!zn9eMe)&9nV}B+rTeX65;2_a*m)dR|JL`J89q?x!(&R-uOhNeEPLN@6iBsA3>N+ zLiDqBsY>NsT-Dh;$4P(1orkrh(pnu!>33Xrwwn$t1h=&U<-e#-F4=Yj`fV7%5!=r8Sv2V799YRW9p` zc2e52rWFvD6vA*n{!)I`3lK5tiJNOPaNpl2I;P3_sv6tgIx>eT$lQ^2w9bVwiuZ8D znw+J3u-ZAh(X*h#TRQg*{Tk2uMC*&{!+0gkHB?&suB5ABL~KMU&D z;fHe?bglTF3l^r*Y*~l?5k*uuq_%9b3WeZ@U3u1Enq|A#{!l`g?tLudwMc1csab3>Xlmd zl)=C=4I9iWE|l|Kf9uF5-{tpm9o<#sInH1g3?-O+U|Zal+I6L8uN#yAcn08n67)sa zNK`q~+pgID1dWk0z2;gQGQ9Al9B1!7_;|Qgh+6ku(OuIN9vzG;`1T+_U-{(81=wHQ} zhSaM<{oK#^hliD4ANlNkYFPIl&r{mW!Sp^}gdWMm+_`>&v$Y^0!Y?^!2*q77~ikeXNEe#q~gnS=soe?oFOXNlrE zD*3ruZT#e-zg4lma{YOK zRNOA{(j9u6x)O;V7Xpsw#{0wH9)e5Qr=2Qa=+0V{k;vq9uw1{Z*DI-W zriqMMs|HVn2Qu?*eBITXd=0_+)rD99Rknd;pwSC^YOB#fU5D(Al6E&GZ-tk~Zp@wM zm`gZ>l}b?G>m=9c)J=Emo-Zz{BGA_DPOvZc;RvH!-4

(eL`-^2Dn`+m(sr0_!vEq;=Yhilj zVTgH!bmB9Dq2c2xdnTe^ItQI6C3mLjVxa={5Jt`GZ;_&3bGakj2p>o*^(D^6$l?n* zJDC|62Ks|NK>Vv;_JY^w{?!EaYlJ~si#dNdz1j@gmm80dB$!Xveg)+vUK(y5ABn#D zr?j2=XmySYQtNiB*KuHvqkGy=JK1O}%$WX8O!G0{@=yT_rd~s*a4&=*c2z$qjVJby zIN;o0^KsUjACY)|Zt3Z^e&j``@eB>*#MKi~h5-G}K(2&Y+d_F3yu_Q z&O81Vz`JShV=zL)eREb_&?3LrnuzI_T`~8~!)ovuQ}1uqim~uHB-D^`!!5GSx1Gcy zPA|DEv5gQw9TNg(%8aX9ipn^n7|Nj0YRgg-WHo(x1%L3TqNQ}dp#SeB;5DXvw^Z_r zYc)slPp~PaXCf^9vT)IQhKYb{K5SU4bbXT&)o=!&B%)Q`_AD57qx^2`lPmL>*1xhO z%tSJJxFQG}r!d@?BO3{}9GK@->{dm$(plG={r0o!I@u z{XM2qrsvUyelE(Dn7R02Up15%ZfSy{0~#_KmpUH9de`@Zohytz>$rHNNrB2Bp)SpI zWsc-R&~Ds-COIueVTMm zJ3d#7lC-zKAXZ#?;$a9+t*aNlmh5SItTUJpdW@njaP%-Dc9Y(s6|qtQw=Bh}-0#pa z&4CU$>fSoj)i6Ml(};35397}AUGUq13o)s1z}{N?np@*7%_%GyQYlO$LuF;;OhjUw=8w}mcXc!ysH=2rNYKm2qnwyz3&-v5%H>s zB^`?KlqU_wgoUw7JqF=CO2U_>uqLt-PrFY%d=$&{r&Q{*8ach46)GZ-Tl+=CaZP*1 z_g;mRBuQ>VwHCRPXc8jfDK!%9V~_QkCaI{q&rP(#&dQv_31W11lZTj|hHbK;MW4e$ zgu?@bhZitD2JFGcyv-F5p`9DLU)NPoaS(5HdHgx5;|oA|JL{5# z8qzf6`=#yZScX7OsL02;a}&>qZII?xFC<>)WxVBHPrB~+u-sNCSK9ZJwe1 zY~xe)EVx(1wwh{T5dobtD8jasUXZMxNigA9E1z&+ZqeB2h&I{04fh-`b5B2OB^xSX z{SDK8ciwfJi3E3P2&{gg3sbruk=Q2LRj6!JSCUkmDtlr2p_P3_9htdRc=RCVLd|!c z2I6pg&+-*mG|3GcMfl{tybN-5VDj55valWF!;(=tgSXR_7(Jz_`b3V`T{N4=L9(ef zbF;;Y0A(PnAs#=L41^`qO)Jl65xirbR_W1CJGYqpF%kBS)Fm^Y01!uLJ4FJyypO-= z*p()p6TJP4#MFU46CyK|lu~D$~PO)lg7g~(>_FE_>U)WFI9oZ#U4s-}QqOWmn50_XgTLFuh zC_80A#y^hrY7A>ujj`Yp^EcvIfL$^Ie`a7o9YWiuc>i!VD6<02^Ha(yJMv@4jKY;e z+}9NpV6%-5N`Ihs(g*%5_$eSWI@+?K3IO>bQ2w&1vCtd&Gr~vHQ{E_fz59ka8rKiI z1Yaj^KY6Z6!sjxQ6#PPal#e6c&^=w@Ka`@QJp)#LcqCinlDZh%J};E554ED+>#&Y$ z3cE;aQctG7N!)L?o(SktS>b(1sRNrA_hdhCO#+}&*$sT54Y#PaQ@vslWF{RWY!v^t zL`+6e_ZT_Zij9|*hDWJU{?Nv$FIC-SF44|b0@PUN62oUpoLL~B6El;@P7U$9rdF3~ z(0S`!ac9c6{I*LyXa>sYY`bWzkcS^ef%7wHI;`{&Eb=_(z|(*(DbF|)lX1I@umR`~ zj^Bo(=}KO}FpUb>O+vvnpY2{bJvKnL5B}La;5zh4H*!c{h~8u6veBBuhGi1WFF<=L zoE+}hUjD`SCn;MF;bz2RWP6R^e8{OiL}Nm)sT9&argQX%wQ?Xa96-JPMaZKMzk5CM zPN`NzL8zp15fWF(e>=4qhDS2RgE|}A)I)!y{7kMfup9mYqmRnxIhf(gZHEXK>HI5? z*CTDOIO9{&D}y&iaE-`l1cpr`?u~H0&sPbD%6BptAub+_#}Bpt*L^VcAM?g-gK8RO zR7)4LJc0N(tTzfAN1eKs^;hiD5iG3bY)qY^67;ELZ9F%k4n!AHac*hQggymQNq&2O z#LlS9D6!MZ{8lpa`~toY!MMK>o#=@ z(6Pq_qTo`jo<_q(Q{E?8u6g6sEigw$+Yd|@behIUM!`BQCcfQdM+pXlOFK2APpN&2 zcGzkJu+yL~6(32xfNZ;LuMSrjNKUl|n7I^*MBV7LsFw`Und^Jz%9TLt$}8lTdCUI3 z^q5Y+yr>Pq`age&n;NG1UL$3KJJBI^9F&vBuRi^_ZRNiovDEC}{ML*V#__VW_&{Kf zL(pWg9^S)UxsYmcyZCKxUxfy7aB~lR*s0Pf6=l(_d#DK&ZC1)@>iX?>JQaF z^aB%`9ELb&6`@a()<8@8-uBIj!IN|*kM`l{^#))w6VaVDX)N<)&&VUn$3rRDZ0XSk zXh9q!TG?G42X}1==QYG-sl{GQw`XYWA*~J6huJj@;APxebtv8=Bdz-E2}JnFZXFU} zIrS`Lallw+A~!%uz&qLC%OFB!JEjN%#c_gikD@ZSp3Zo!lWK)TRop0gDbmXHBb4qF zDqxh+1;9o+NcDIo5xZA9Tlp)l0K{><9J=`l<>aoeS@gdWMWu0?$uAa}Ap+G3DWgq> zi1$A+$do|duagGvAXvT#57yv*5N1V5a8eWv(lnx2D;t#m+1Hw`04+wzd$oIHvsHaJ zBTnKM9=(~y66|>KFCBq_e;40&+?lu1*0!LMB@lKWCoH;ERPv?QK^}R9V3uBt5`b{b z%rZG_fO*+dZMIwpctv2Rc4=qs`1O9pATbh8n1BK|KTSNm*T{LWiZBP z1#eAF@NG`h1BJLA8Lm&(f~KM`QOT#R~y$-Y?NUBG<(qw2bi*pCOexNF@$j(A(gqQN^VrxH;Q*6e$*Cwn=vl zl6dcp^TC)82)4{g+mly)C&y+7X0Q{AF3H~>(Y3vOA2FQH#km08fw~$%!1>)Tgx|Xpr{sV$A})G3D_s7mN9{tq(}>01B|*n)rSrz}EQGoD z6rE`%i>|oPFK+CDWl0Zmo^dIV0OL^w6NQaXPZNwNgr#wE=G<#mAf?S-vuwe?*{!qs zRfb+*kRd~E9G@8yM&p49Y#>drKUCp)v z*($wo4zQ!GT$0+YYgt>}>k$&Bb)y-+c3|7k{o3ij@ zb3$0BP;V3jx=sk@LR(F=%}2|p)$2Lkbv&p`w;>}>e-po0L@e7g*xeFNGYaSDEO+pC zKH6;xdCE`dVFZTd%Uz{+;;&Ey401;1@U>Yh`}1GuA|*yyw&xCy-5*&KN*V`$Z4?4g zzaRRwX3}VmmY`{V2cncP)ZA8zQ)+7qH{^f#1CGxj&HaPhc9(D*j!#%EAD@Bx#qv0$ zjF@<~_7DUEZYU2OC=0$t6dI*NsKDz<0DlO|3?*UJ)eiv`KoHhZcSw-?(ES5K%|eTni)(wzpgOq6e7}dl zD3^POManp)n zDh}a=-z6OUHpk0F5A%NGWo$L*cg4*UH42SLS#gYF{v0%HlZ8*sL&jf4^rE^`dX~)o zADU%G{O^3SvNFA!d9(d-&hMwmt>LP>AH8Z`)!640w0@m2KPgk8&M?v#8q|6axoeRl z{dPI^Ve1*YGCN!sKKvHk_9ONUF#@WtU^+p{C#34pj|;3>bN4Hp$ZLZ%Zur>FSEt9R zo&E@Ab2)|Gi6opqFn3 ze#R7d#s?<*C!}okDLX(&?2EIa0(qa|CV9@P-IN=1Z!H zMzCSe)sJz(MC0A~iQkUwb(hNf^-N#&IwN@=BaH4Ha(oSq zmk8XLPj^ER^RuGuEj%bxH3>mdj74~JxisOS%PYRK4C%CRa5%60!N(qNYZ+gbJ`}t3 zmb(_@xU2%Hr?hn_N<{E`-CTTs`X=B!P>?wy2V+k2GGjt^?@ye?*t5v!5R^hs`Qr0= zn^D92O^;RWVT%QhD2B+icc zUvm2yahp0pfsFG%*|3&L$wlR*(7YTpnlIX?&^wFam!*MU;w((bb!86jX^a#QdyoyU z_+K%|YcsN^WYmRQc?5J)!S<4CGTx=QwPI++UI<9`RI48B_9|U*Uv8#;ymtS_d9!jI z?!$>^W@lz979j{cW3slzuimu$3vWrgMgoXHbeR>%1!hK}IqH`uJV3{u!lm9hUO4@! z9yAEZ=&K4KZc+j&I1XU6K;ADd?1WO+7vc=UI#2PPkm@=KD-gQ@X2?t8RHbg1t;i{e zLUVZ+X8Zwz-~zo{O*+XV93F!+U0H^rae`}qfFH#ECFO*b3h#_KWC)&q{T6T?m$~zL z)Vd8@nsE9fX4OyOuDmJ5_}v2VbizgHsZ_I90sVf4Eb_*^!zlt?)*>OJ2hvw-R7Yd>$p3SG{-g#5=T%3d->EVd+D7CuZ~zxx1#vktlQcDIou35R zY1pV$^WC`GLHRq(S>V{G?!kM)@UvPP8(|^ys`5-|qF;DMr&Fcm@Z1q}!Z2|xT{?&u z+)D=+&wj{Y^7dP#ryv_8)Mz2tPxhZKHIe#rx0)M(G)9IH-VKDw0zaFKX`iIkjkYkH zZSF~#natUx-+zPUQ;6)Jxa{TWeUL^Y%{ssqbc0o580%)!&- znj1tLasrXZd||{B*b9YB7V}aw!}o6BUL=A*lHZ~wee0;Lfe(a=@&9ezS7(TuL7CD9 z<<~LZICaTAOFObp0HrPa6S6ibUKh-sV^wElTO@g|K0LeQ!K2%LYYR)5Y85|V5Z2u- zii!MzK($`~i?xNRrdszgRH-Ip0lv)jhq)~gWhn%+VMG!FJ?l2)t31jTmSXym;Ommb_&$!5^P|+9&414lDKtQo-%(oi>hl}j^Y!I8OwObG@f4YN;`65L`jDt zEpYcG{V-YE{iXN}m0>L)iE^ z)-TK6MB>HXWnw-2dD(KdSejAU`*pp4gWmg#12TVp3nUeyir6|9HUM>HWuWBUP4~!A z>{2*B72chfnYu}82mG_9>&~P;3_dp910&6ae;7Dd5bjpg%HMX5J5Dvp1{|>lVlE}V zgjW5A75za-ReC&GiZ{t}8u91XLAv)r2WNWmki=hcj#A#0;sim7%~gLFl10@c#9NX% z5#b`{v(pp{Nnzxo8!pi;`8K4nUPPlnj8CYVQ#7$PPANt&sdov(dEV6kWSd0HiHNgZ zj&0k`YS|~d<~LKo0gj7cz94avrMQ-fx2X+`%G~Q(6bo&cy|DnO3 zr`?2m30!c&*Qbq0RQQmx=pJlAE0lz8i1K>z)RD_f`h7}}jmF1o@-=Z%)g0oXvBJVv z#8UNkh8qmSQyxc1-_w;P&xY*0_lWmh1dd;WW21VgDAae}>woQSJ0cWwiv0M##dlx+z zX?H>${Z8yGnsuDBH=wN;`2`$tjj96R9-rna8e_@F;RrfkWQiZDVR^p2*>Ug^AB=d5 zMbsO<{Q?_hO`7`Q;`rvV8|vouadFj$ia-Bkc@&Pupa`7s{4;Vc>CQjMftFIe^-jt! z?Y74zw&)dcPH_aH7}rzR^cSr%Qo(}gJIlb%gwI4a190(VwuO=RC?s*(V#VOW={5-) z!*rjzSrgP++xss}JeiBQs!P&BHYn`EyaB8>wL?{H+(7Y)z!&7o9LJIrV!_KifnDajAR)~0Rpd>EmkT>r)+c1J`p?R}!cOxd? zwbCsns+)uL3ME`UL*VDBQRnYfysb*?BPL-E;rr$%<0~|=5~yQV;&OlDF9_17pMK;7 z9T1|*SQBsI!c~bh=LmjnynTxO4(Y1y>CTxV~A(XO9dBTM|>a zOoHD9^P($Oe`$n$4R_9#*v(?+&RWF2@%)O}r~Yv>m_#y%dwZF>q*6RfB=%QO8Wm)b zCoN8n3O~*{?q!|lEk6>r06B685pp*lX*r>N2?_hru#^=eEG0qJ68VToMaSM?=^SFs zZuh&`2NHT^{tZ-FVV)nU+m`-eoaru3kmnyksM0kNPWm2%A8y5!x;YaU4Ds1W^h z@2rADir=0(ZfxFBT{eujhI4`*ABbR_Mu1uBzcz(iP_g`HOY4U2grvM|CkqYDJ&C&- zTjKWjIhWcS?wGE6&5qBgKEXmGy1c&|>Q}mTu#+M`&c;$wVUqkSo9C}tHL=Ji%M)=? zVD}(qJJT%@Va_zxc<1g3HHSGr#dvKj$El5RN8m%{u}iZmWg*QQ-rNE?Y=>{G`o?wA z>C--8U}!hR(`Ps2u5pxF@Xc;Jx3B2uEQO95A5ZqpLtbNYKT{cL?&Y>rVbEEz)J&&C z)Ncra&WdUpfRJrF?sj5T2a~U)T;eDy^m^tn%HQY1#fJUduX1P^HK331G9}-4!&A(b z;%iSKJd_^fdzE53Raz;R0*1B7|7K5zWCrVl!E#MC;HPO~iTtUivb@XwGIyh5GaS#BEX{Lz8xz*zSO1G*gwOHg2MXZYOih zUmZqRr3^+h@)^FJ8q8*+F-EmvqT#wW zHMD`13e2;>>`e^lCN;~{Qcll75b#J^3t;`<3Ek^HJwr(f=^I622+)}gC9auP1+bT# z^7{oyw&YNqod@oUe3HIh8EGcp@3-)Yx^ig4UVP0S>9BfUHX&*YA#GGvZ(( zBMLSO*1m3$v#0g7D|g7rM8|j`xgzxNH>kN7{aIN2XD}Np_{cG5T0W8&1A-{p)2Gu- zohHhWK4!FkHeU)aV-XnRK>;c{RW9?$48ce7?w}d~9Zo1e zK-ghlmHu3}Z~v0a)6Wt|R7oJfu1AVY&@rC5L5dbdjREr5C%WeIj=;_} z=eUlyp9`vHwSmM?jJB}--B$lDr5Fwu_&wm#0oc5K!uL2!sRXdCp6J6V|A|!njD+O1fQF!~YT)&~x)Rx8C9Vk;*b24-?sDk-wQCEjjcdJa z0!`NnXtl087Dl#!Uj1Km+6)MZXsU;Sz>^@tbBH!2j;EQK`+PQK$SDXQj)%$&i6OO6 z=Uuh(@|KS5C)zd?BVPkW_-gaS*PYTh>kCk^szjb*$T%AeD z&9-TBxrW}L_1D|@%8tL3E4~~S8OU$(G-b5U&4iUfJ-6)7L zc^tUvEOC_rSxs6Hsjyt5lI=xsrK9NcNKj|9VWuTE16SD@bvGp3#)+;FP5l%`x_QVJ zZ*sSD*8#?T69XN`=fBvSyjLpu)0}M~&Ynzxaw;Y$XQm7PTAH+L4yWZVjUpexDmhmi zL45T@wOg0^_blo}GN3lH0vAAU>6=8uM_?G99`ME^U7Td$UcjYR@%xr%wixcEVI5d z=$Ag4DyYJmCrNo<*T>=W2UiIOI>-0$s!2Cg-bB+XQttM7u#4RS325-kj%5 z9%A`GVHA$(aG}b1W+<--jWLrh^Cs&fX(IC1s$KT9C_U%K@{hddKnTtqR?w!_corr! zC6o)zgqe(1F7j}vpkUjNo!e@`aHG8Kotr2slZnXD7-+^F+>Qk7exvOO=8M(VHG8dS-Oen`!rMDYHSV!uZ}y%HXPZIyB6ZmJ^%^D7XF7 zg2@KdLTe&aWq`7TN1`+LtU>2!z4dKN6pN-6YSkq{}Wk0`mv{nlH7 z+G?J9&c{-QsNj=OK58EA$RP~YNffgyoES?50@@&!o~j@32Zy2@6_Vrih$pVL6WEte13b z(uti_4}FTayye9i?$M=apH2{4|7h)exrNJ%!Bs8X+YuZg>Tn5uTu|u77bO5Q^}hd_ z9c~X{oh3=vbicfwj8n8Y+}6yt4PZ$&+1 zQh6eO)0{68-dQdZ&;3znT{G1F$egF1lbI1_%px8#I(-f6!hClUi%wuS?bzTJJ9+YB z%0s?WKg4vj)Dx^%tz9x%q%Mh=el#`;$Oq-+e#VVIc- z83_MLFtM>R|8J1xzd$xdMmF~Ujk5k1%F4p<-x%kAK|DN!^vZ6w#)R~uHr9@W^h!C|S(_5l%jg^aEBb#BQ8Rl7$A4%Dnb`^HrS<Ya* zl?jHO;h*uaFtY!9GJL0**f|K989D#GG7z$|vi-k0ziYt!Q|ljwe-2I-CPFr5PC~YC zel}*NZ-kZoo9VkZY@GkJ$jrn-$inhn#>hy>%EUm(!Nx|&&iK!Sv$Ar){KN2X%bfp_ zXJh0bRWwl@&A*S zTI!oRe9!!ULjgXOvg+-f?Ucyn|(#%l6+SJncyJz$Q z4u;0xcEQN>ef#PEY4V>OEz7rp&@2jQ0qp`5@w@n(ud^hx8UnQ=p$; zrREAXIbiUxc&dP!v;b)FzTT}QzqGh8$Xrr$vx^HDdlPxP8s;WQ2IjgN24Z>n5{=e1 z|F6~v{yboN=UV%k<5ybpGdLz^@aj;kcK^XenoXd)AsrxeSs-R=L|9ZrW(?n;Q~Dqu zXfBR>po2Tbf+p1`^ksN%=A?Hjn96vn%_;D4&3gPM^39=Vd5FZ4*?7(VceR8o2hF;ov6185om zb{wajOUn(!F%2^Ef%H|5_1<`?-eHc*V0#F+T zE&))=h0p)YRAKPQ#xx)h!cVc-F7>$SFaW(nt5xIAGDt_Vd&3)n^_Hu)hLmPuQ7rtZ)1*Un&7883FD91XZA< zk(U*gonLgXa#%7m)mq^El{-6@rk3y1+R%&LntR*e6qn|f7RFSTHu|t2&3(G8W`{rZ z522Zq>g-t?!BTP7+kC$otE_Dx?3)_vASpRp)IZzafZ!%}!U5+K*S}b$sFiR$Vk2OE z_jP3;>)7AL4#U?oxKcG%wSd%DIDv+yriOH&q<0XE^-UpP7(gp}RC$02S$hWRZuXAB0Kot>-!Ke7fkeItoq^;Z*bPjKfrXS}(~>GHZ?9EU5rE_` z5cj}<1_6Y0K=ONtdk{dRKE!NfK=NmZ1`t4n0KyugG(M;a`6omW=EV0X^_5=pgWr}> z+B81ZwSF4o00BL37$2n6oIL`tqv8ihzgJ3tXg6B<$2VT~55D#f{)NL>5AZipCA?Xv zv48EHKq#ubh4iB|{}Rv(*Zh|H`$Pa%RC15wcP+4!Gs9dTz#bJPv-w+~n>4l8354Z~ z;3#diS2Kz>GinnM>R*Yy&gpL>VrVP#Sq#*JK|Zqr&NE#3+tQoz ziT}J!pBcV7t@J*1A9~|`FH{0xP*FLEivVtTAF3n32wMc*oIN@p(D3*47y>@zgCFNW zEiZsL^&?$Q;Ll%m-+mDv%Z0ZDPnkP4UyH6jPFS!UEJvzf<~s+Uyra<3Lpf7Ga`L%_Dfy%)a_XGDQmWm9X+EP` zpNIsUy-gK9uy+1Hu0!0h55nm*+VMW=Qr#%9q=Rcb1pVM~lg^UAaoe0KJ0SEN zpS-5A7_nK4+Zh3eDR3>} zU(~>Qcyv3E8$K5d0opGfZ!J9tA1T~=!`if20zOQ@-szy)zrGIqVJmtsW4gL`03#rf zsiEuDz$XT0J0D^g7#vYo-P87dUNdD3{C5|VY`p2N;8-_rmK89-FgF__*OPxcqRo1ZAzT1{=929 zGIT-qZ*h1vPc<3cBz|8q_o()(Zx!InwF}defD#$i!wnUFOhO3oXO+Xpp zyfhjDZEzX_Y?t_Qt6&x>io{TG$=|r?p`lmh1E04A#x~ef}G$SDq%^(KJ?vd z4WYPb7H3~rb{U+>_qN%wV4)Ri2aibUkPF?oi5P%=Cxt zceP=qP2}~RFi@Y`rPjTSkqid9YBb7e;E@;MhkqBd2gW!@GKO8%)Fg(;ti!zf9Pde* zNouONVGhh7!djN7gzbM(^slb1@2!uet)?UVX3XG@|*Ldc5WpiQ?cQsKxPl^8u+ds;EET`Iy4$qq-gW1yBo~_|Jt@LYt11#t0V*t9U zVRqjkpx-f(LqZpkrc%{sf#_FD0kk@vp1-#NXVJLAwYq`dX7!?9cF;))mIj3&N+tUEA^Ul^H3#)6sT zA2c&MX}vA2jMO(c-ox^dUijot@7oFUyrQI@WBV-FWkCRw=4KWg!ujaU;Qq6T=7?zx z)cLtfiLp%n=ACn~`pGJhz*Q=aTec)_qi&nDE>E9|aOMP+H|FBNvqgkp&H-NFoeSlZ zM8_Xto1w_f&)c%pR|0By2jKLTu9t7Bs@Dax%fFZLx#&UN(i7Jgqh!`vJ?@<=?`=2C zHl1d(>0vx5IQ8cdLB@d@?I%I+J3I)uw@0GgqS)efB!Xrn9J8xz=x~Pv#dIE@sP=#~ zA0z84GbnCK#rDX7)O#A^D?v2veNq)FPH@Y5?7UWles2v#m}PyD%|yDl!ZBKxMu`1#|+rH6FISNnnjd`40;z5!P?nA zA-n8}5wm19Qa8_*uif;Uy0>>=M$5jH8nXK1mNt>#Z7hVxjq-VhkGQ-7;|I zgO&lpASwT*xaJ&;&;vKkR`)aZ(w}%}i5J>(@Dem7or16znRzbzoSt>;%x@HC7NaEP zgBj`8ui^8fk(#TZ-4{h6K+AvZH>HvS!DJ1#`5JC3jF1~Z?%eV@V*|JLp7m42gmp3Nw78a=gWUG$!y)BtN!Xk1VdhFZ4F_68N1=KgwUW+9CE9hyr#|5^3>ih% z4Xjr~qzWn|{Onu5**Q@kk|^NNJ8J=B-h99lvp_X3dF~}|s6EyiS*zZ|mt#Ueh}Jxz z`dmoC$J5#>Dld9^-&e7@P*RWRu6Q-y_*t!gJIO6rH$q|6t~^pWnR@j&9b=CEM$2&0 zb9fX!9SN0oK#J~Vcg~d<8YdJ=gu&&dc8vKM$AB8Ty{-?!?LdcqW(;1itHVU1V6u(h zbDjn-DcQ_+txhRQS`dDxobL8K^ClFnS4(pw5v)N(1G>qgW@y@1S)d=YmxS0e(TqnS zco}jJ)2Y^8M&Jf36w(itF@V_F?S=aQL-=gvQOh$^Yg?$RpC7)*_VmL{CudKGD?@+0xaHH}3F78QI$PGH}#H)qQG z8Kpa}rmZBx#!_l)-3F%EYu9Q$FPA-VyeD(dz~Jnf#n)gF-mU1euSj*YFr!6FgI#e* zOT8cfbyEZ*;=RAmwA2IkEY8ez3(QsS+8DlED=#4uM;75II{Yw-npYC}#TF}Zyi}3v z>0q*On%tSIVB>Tw8(J z1YB#iqI(@LbBda;s)n*VNi^o9akVZHwtZ1MrDqF;8jeOu8iRVUIefURdN}Kz;^%Fw z&FxS5aGCa5q+rcq&ad>=gdL{-ONy%T=<-TWl7F|iJYb3%$Yb2$&GeUB)f7d8HyScg z+@|cFByd1G?@EwI(Y-&x@}x?Z%R*(AL$(qLHs#)vR4FL!d8_&=|H7#paPQb08q}?y zc=oNPaDTdc=n`06xV<-I*3&}-sNk8xZ-<1sed-Gv2(1^hXaiL$Lz#fs=%r-gMbYy< z?^Fs2-*72%mBD`T7kP-VEu=&b@G%RR%Jl~3x*xhdos6MJ^lga;yrVqy!mZ2T)z)X+=}vT=hSSav#u53F3zD@5q`9yQj)Z8I8)6ilYSiXU%p&@-XZ zu{vQlCD*B(Mg05I5_s!yq#A;nw_hCKQKG%tcqrc`;US&49r9%bcGt~7F9upJ&&HeM zOuebmaYL{$e|r%BFel<>dh=(Vw{b{kiyVh_nQT5zG=EI6hEoo`dt~W!KOni|i)qN9 z>#gN8;wmzUbsGp04tlThVobt;vUB4#|Yuv2gqx^+4 z0hZVE2TE_>GNM~=qQjuW`NSNvu3zxb8t)o1R~1_SkJj|rAf)5RMO!gh$k!JA48|}c zwH9PH>gdaw*5sq*cc3;dX(8j}$ZB^u$$?p(Rz$<-Oh*ek6ACGolM#7sg*5zgwancB z+BR2Ov2EUhFvVZoB1J-B#Vl^Jueik{&IM+Q5WKPuxe^kaB`^cM7r(``> zdqzxL+&8Xsj0?1f^@6ZM@v3stGqL$?_fxiF1$k1E$@BMp29wum8?Ito@o=s+8h$~G zl*<89?0Veg zqVcU8RT8YuhCIN)Si^PMHvaOeHXm(b9K3%!EFF=9_snKgG+{v6GCvXEDcbi!w|`O} zelF>O{#91rz6k&pzM>P@HBLm!CdcUo0n-Vq9w)DV6;oO?jM+RAAb!_D)n5YTtdRTm z?}ecKe!QVu?{=s~Vab$|sLFHy_no;CC^j$N$(&t_SQ>z?z z>}bXkcytp895e%rFz)TV(wi;l1btj$KC7DMzI1i%>!7@tIspWOMGl1M7)E zp+yN^U19D>TqD~y6#1QFwi@!cA2o4NU7J7CXAE_yBW5sfl1m0QhE2E+^Fz!gR7_u2 z{@5T}N-$Bz80B?G8xHe$p1&hl`^)s_U43@`&S>-egy1T$z1O)aS$rJ0#P|>-Za_@^ zeIcb=EEs%nwSd)WfoU8V5af@RskdM+xv3JkMTCe>$~lq|LH6Qa`P7ZnQp{LRYFW`? zpd~F)Is3D;(@+6-7l6FDgCqI(X7{p5GqbJly#>5hwkVRm2Ii9W72%r7@bXuyTc3jW z0`!|DyK@hb&CHggwXPT55~^c4dAP*~<#4am4(>Upr6_yJRvxCkQfENF3_9pQ{qNg% zO$<6ssaf`+*(wpK(wV=cjbJt8`k0ifJxFuKsEL9Yh5_DoxNXpzoo7m8fMoyc#^)y% z<{t*!uXL9@CR-LkcOIlR;j_!|{o3Kb`39C=vtq+G)DLc3c(;??K!k+uQ-EY+(B2&- zo^TEo`w;LWNvR@TmLsqpl{)u+DMZJ%*6co>eE&$4qc1wHus@5>{NcTJJxViImA01$ zI)O`MX#Px`$o!#nE!T;4D5io9mwbXrx0F&SFVTINJWbRLlf8>m0=JADGMNnLm@3)0 zGH)E5G3pj1k?+H=-!SSJP|1{XGbk@m$pSwTM^-_;nXSk6f(-t$yE;=$Dx^F`OQs%U zo%v7!pJF_!BkB5!F`t5s;%namMwm^%4AD(hR>n)o$kf(YECx0qXE->wH9no5%7xdI zt=MwL1043}aTr2O+@u_YZ0tzHb-3ONzlEYd?%~o3)>vV-Mc5{-s=6*!5qukus=M7a z1MIfb{>p1hTj*OKTYa#Xcgzy^IR>ftdx!a%c^t7pS9Q0_3$kg#(`K>?zzDuaQ}(gO zv-?Fq+-*F3h4r|FOjC_rKk6%t`F{aCK*PTe3Q~vfQ`|}{$>Hkwu_+Py_*1V_{QL|R zDu)E~;o*id&ilgzB=<;v+Kdk}YcVz1RKqs7H0?ib=qj{8qRnIB=c7hS%cZri`bwD~ zx`bK~m5Rf(V?*Jma9@${$e2qoZ@ziE2yHk(dCCK)Qk*_Zr61&MkV>d@89ZdX>eXWw zENd^oCt$^4Q#EM@JleDry0^#f+T@!B=jL7k|}-BzkV*@cu! zUADJlb}G(d5HSu)M2;&u{xLOE*nbcSZ6jrIk0jKy`{HWE{ec+%J{k&3PEdz`MKx$* z)bk3BtSC3M#HDckI%)Rx!_4_EPw%NTOzL{2k({8A*r)+VKE6Kno{S(Z#y3LQ?taQw6u%a5Uvpk|~iuwI4 zwcj{zHhrA5h-W~RXOrx3xEspEAH4-|F7nmzJQ2J){`C zV)JL~PC=%CL2Dzip{>D!tvG-hVT>t!X|9<2A_)c8x!TCTC2;(y2dWQ3+=_9hfl@SY zT>vD#y{muD@x}abmcvzTxJcoN>j&vO(a78N^6*Xj&$lniMuOp7oQJPDJs%@`3}@p8oU($8DXPDxJBDm^ z4q&v8xbu)vBod#IB*JPEt6?9$2 z#d%{s5hb&?;E|Raahy6{bVnYAt8UPA{MJF{J8%6p5KAv;48ck(PG+Wu5GiRo!D;qoV~>^8q=z9` zomnbPgoJxduX+LsNi(MH3!jhV#QwzmaisuxsXhcHHyNAA&oH3**7hrzGw<|4+Pn=0 zxcWi$GN20`dMj#EV>Ko3JH7og)A6OBZpnm-+r9;D>>AAyNrSeoY=8J5*=tOdvi za=%NI(M?e4nrx&CrMV5L`Nm4`O;B3>YLGePYm~%zKuLmhy^TVCDs0R+O~)qCb%HN3 zR_mvy7~;B|)yI;z{0Esx6|HV?P9kJSm9XdgblNZK^)ScCqtYQ%wugxZwoQ+RRELdy zs&}F_$Y0Q>wR(r=X<@08*K%cf%0uMvY&}qi<8zsI7uwo=8ku%qRV*8x%l8-xy@Nnnw>)awAyPy^FsyEe_6JU;_N)f_a6g0eNyXLspU z`}V3U)hLOVP$y_B2;U3HnMDt{)Z49kb4>`RsP=C?Tx$y_EiS!9jVRMqe&W5{!A8D3 zFSv;Bh0~>8;kw3u(6jf=74OMrr(VwXIPjc^hBM)AdF~e)OS{>&{CLuPSz>o}u)-YQ z5;=)Ni5dJ&hL%zc24@ouS{Q0LsT;FRxSlj^e7529!&<~;V_NYoh}9<@SKJ@XEcEMz zZh7iG;wFm4SF>mxO$z+aVw>Z$*|H7OtRAnK*H$Q2QO_qW={}CuS`M|MJUL10oB1`o z*_a((dGkpjsr)(RBktCk%PXgylk$u;omu=iU3RnAxbDtuisOQfS4BK_v}8*~UEN|6 zY6LX+HeHl8E~k85Y$VpDL{e=pBL4Vf7qAV1d`%Rj^DVL8#3iObhNpYqDw}X{E$Q2T zvE1w1musnfhAu7$Sf~O*I5__p4d$Pm&_M{7P`tdIwNG8^!&0%Pa8i3lbseU`d;KEX zBFY*o1~j|0Y!PfDc)*22ulSmFGNnUCz-y=jfntoZaopgxcbfD*1U(s5-&WYko(tWh zx{UH@Ne1?`>nG_I@7o*Ntj{LUd^-&+q zKVsK78Z-@JEMJtBw6)5J7IdBPSKcS+KURo-B3=x-(@d(o%H5dl(p+2FS@kSpHXgyo zxfBXjj>3cg`H2r9u6bH4hQj*E^dr7F{a@H#{ zlUn}?=y+U=WEPTp{loXA7Z2QTB%~wg-Xm*A>#1lv>C*7<`-e)J-N{BSrM2q$cz5#a z9%TYum6hYANbp`=Q=1jUCL2i*QHNbqF(p#G&u%Ad-BX->C4|VX>(zzIaCJZ5ao{%> zfv`T5fMhC>;uAVtwnp)$^O0voD->CQ);A{+f>WL~OC?l@=9pMXLIu)YUEZDQi&7&l zyX1QHElSO5PKbKAM@h+c0+c$$QC4DJtgb9;qv0^L9c0{)ezXl8?fKi0mR`k^f!&LR zlw=KcEs3R2r;$|dNdAHa)ssPMSqh!gpR|Dc)F91a|-gE?ul4=x;kh<|t%u z{nN8pkf2e{6%5U8K$q-A^}(kD2`g~Ej#BWZugrJ@%PR>Qi@m!lD82{H&}qIn0yBe^ z(a2SL;$t5{@^`9?GT?*CSpl%)z=q&}P#E`i1a&807$B@gq_}nXXVmamV1E*k%bR3w z&nISBbma>EU1aVH#7`f(P_(>|8q+nj(U}@--!HE9P&o}uZpmQcsVqsTkky^$QWmXu zjcKq?0d$-EK1Wd7&)f$#Lk}YmC>fAQti3KOAj-Ap(qa{?h}OTE#VeS(jawDKdr!Y; zWj*_TEGT243}d&lo$N4!=B@%S+juj4+D91eYqV9E1c;@|X=T}hV7Su?MC*+#HD)sY zHRalw`U*)&fC%BezjiY*Vjt^N(3maw zaUGc)&0}cUu(~hl<^Bfq?VG_?H~eAEO3%-haoaJ>b7}36p{asJGO-|&R1{y4ZU0xP zAoZ0YqglFYfm7cpdnoK=Ci}M=45{Hymlx=?k#;=uf)}n43sB-(JIBr!_(EcWnm+Uy zMc4Z9L#>&CqygSt>r<$M)nB&)aM-#^8yO$axjJ`@-nvk`p>KS$r{70?!$FOngnQpw zr}f-mOnQ*jSwm4#;hi`xr}gB}YQ^=17w^)jf%StS6@dtLQp8dM5BjbmZ(*}qxs9TF zWWC_Np&3uZg&Tjbs7LUqy`}PnJTUUAaw$(zUp2o8I7JM&q>MjLjCk$Alm-1675$Se zyb$xVT9c%GyV1L<@I)8h>u`r)2VZf@_rq-Q?(Ia?Or0^KJQs3MBzYSU=N1WN=lI%O zLBX&sI%_TpU2n)jI{d_$DR`|Xuv!x^KZH~#B~Z!h){Lf@R)ZEEVviHaIgMsj>a`ao z@R}xCFA00Tht>$Yd>eVy&@vfv1TAZy#e4LCuGeYn0@TyZHn_Wzlts+W;BCEFP0KG7 z$+%&P2(+8`R>--o5GAE!^?OA1+YB@PG$6bTk_{oaKhMI8&%3%R@#7+**ThjU#j#gN zzn5tBF1Oqas8yohE%|Iwr;rPw(SDUZLspcLE~kyubcRV!wStkaHv;{memLmp1_5p3 zo>sHY>nYs4p;=C#`lHy)*-zu?F+*R-tR9k|^tc~Kd8tTrH$+r=9pZz@WF#7hqZ;K& zl1X-nihhRL!jM{|F4|rT>*+e5RcU(OhNJ%Y=ohWS!T|XsqP1Y`Ygf|!X;bF(zJ8pB zdV&v~?8gIop2RUz}l8?2u)Zzx{ft@Q&$`%`K{y&MOJxmJ^FmJznBr6V4a*|MF(tHS{jb!mh z8C1X<19_D!znKC)H-Wa;q)`*6QP=eme!jp)3_c9Pmu&Q_Qk_ib+)UZ@3@MIBFg3~c z6IrStQb6vYE#nLp$7gFTyz9J26QS)mzDZjlK=+tUZ%98=`H`H*6!~611w1Fm*^#@p z;SA&pG5F{U+k9G|`~}vO1yDjX?6MOj1zkx^N~42THi7QJW-*}EvO6Efg)I%8WhJ!d zd>;R0h>Z>k0mZo1_~J;W{f-Q;C*>h_L$hvq(qVObvRQhsm)oro6q1Y^9Um`dyN@yj zRbre3r7a)DkxJ}WAuZ}5l`kV?F^oU6*uQcLx1!oLZB_?wzMj^j zBqz$ z=VYcLY&_(V9NM(jtC;-^Q-v(p=+@G1{Kp^Yl$r={(cdge-#FSR-K}cv~uT`(g=W=bBOfMPiUH zs_p}`w-%=43qUmf;?U%yL6^6&aw+J!2YcioI(&&s^nG^o8UuWWp~>*?{lPSk#4D+(!5NoOhVF8$V!lio1dMq>$jIJ zL$|a*UVI4~k8hR0wQ&j*=V>gCWt0X~-^1|{D~9PkVosim5Q)L4w1DkA9^=C( zM-*&D%_Q__-3l^$f}6T5H+|QlNj1Ddv=hsESHX<~WF3aEt4HJj3YsJU?_!JK*Py1@ zLI-LzR2HVN59o>u+=+G5@3gkorrX30)_?&(w(lERy89FQOLDG*jt_F=8&EG?QvUAlgM%ourXMgC2zeyG3^F2hwk?GFBv>P)8FfH)B6#CFU0@ z9z9DJ@o=FptX%u$Ml-PUN||^j_A$L*H%rM%;9sqlKQ!FLIyf$q| zHcls|Lae@fw*^o->|xYf4~>?7IEAq}s_sjbISk?&;XYg@>R2|PVG8eH0Sy#DyAlj-L7rVp5JV3X)shu0lsM3_v8{+92@w>PzIMb;{~-851|JT{?4=47BeO3MVz9Ry|Cq5l5dpc#7x@f z8UP>WpL44vaL$BOm=;LAzDO{yC<;&6vHIwL_t)x0tPX1oykt_aYPmkm58R|Rs32W? z3OG+Hm@#ngz6#QKt;H80HEKgp4A43=T!; z5R@GPujZ#PxtQt#-Wc`Fg+W`ELCZEFG00PNE`;)p>2Qma|h5Wvn<+L*O|j(sM4PiXOaa8UpL-| za^y8NKVxN3PRYc2$vY{8{eFLd+4?V2Z!ec zVBl$?siQr4Eje}4OqcNR>e3Cj^juiJfXGyBQUmLQ)#m|AJUd@Q_t~p(dY6=N_d1O4 zz1v)gihB-? z&tjIsa)eV3`A1G?^L^hmCr@2ey6&TUj3@Mj2VK9LZTk~etvo{0k5u!&Hrt4$xa}A_ z2=OZB7NU??0mMD>Z7*Z$+6XNZ-W=f}V_IQdA0Tz2_K$?BA_r)FdpiQZFF+m#JeIOZ zcVyLpo0gp1JCrljC9izfuS*mx4GCFGE6A735$=risWLpB#T%W38L0M=_yop)(yz(y z=Zqbh-N1+mDl2BsmK3F@k_ft>X6UOnOaY?_o=ay!-ySdKOuvMm+Fx-#^CZ()&5Uhd z)88v#BtKekDy?YWmWI?qKvb{6zNkI?#~GgNB(KJe5E%S9dk{nRiDj0ge$EYLPEjP-&|*lcvbm#6~?c zB40&G&x8|)n~hE;YGGUbb*MJli1`tZoPub*L&TA~E%d~{_zZ}@t83JG=`h(k9^k`a ze6EkkW6h{#_oC_=o_{OBOBk!y=z-L+vDkeAlnkk3s>L+>YVx4+^PtTK@1!+}MFpl% z){W;eMxCzf;!eC^R$3F&Qy{yBKmVHIC_4iJ@uyi^^qxkrysUazU7o@nJ(}mpuRmG` zvJH~loIA>w|A0vBW6O^Qm~PHDID#~rnUiHY?a=rGMmZWH{0mk#w-CBtTR2ND67Kf& z5Y2I2{Os5E-Q5!79UfZb_Fd!Z?(eJQ``QNrVHe@z%X`89bmPx(gZzetBTz@)%5p_B zBCH}o9SyQ$9V}3-3?PW*Ic_cg`DDt4WQ#c;X%w>Qv~2bZrgx_iIfF_*CQ2`Rls`?%ow) zsWARLL+Ps8nInz{sWHzoYln$9cP5EcMvEy5cBO2d$uWy=%F9i9 z>!MXBkNDW-R##mxZa3-W{SD4I$9UF5P2|y)TsjL45haIvx%Z5=J=5($2B;Ha*OMNu zG=iAlqt|nuJ;nsQpXo+A9`QHDEOce$(36~FxFwp}wGp!|zoQJ@*tCdeI|$d+pXr)v z65|}e1>hza*};@qO#vNzab&aiw$lFX2cW%+qhr}0+y>42p%-nb^av3s7V^xE|LIN3Re zgYj|z_0X{G(O7vAe;eSYq*?qtvX`%oU|-5?YDnZIaevC@D_Cth|Hp6(1b=^fD!wp=8k)9s=LiyXUNVeqo`KP?JrVpo_5WPo8kqqr>EuU*; z5u#entK{GnEr(Ib*I9JN=&}W+m4`>jc(w5YfC{!q6WB6kN)yR*>SKe~bCC4-WV<2E z{-LoV=>fszUZU~T9r9EtnxC}4TcsssP2Ghbk=mqvPY5d6+!z<3;_mK$C$kpbrFP29 ze1FudYjd{yvA7g13#u{!H7P96nnP6?R{X^rJIc6e-XwB@!LvvVO4IxrWiu{R(l#^kd((kl>^H}KcZUdSQ>dc4vBn!6 zo!Yb*?4D8ZRki*VZ8W@;{k?`h&Ozc!nURNWn3IPl!sTv!EF_B7?|74B9T3I1MEoB8 z`}5vIt6LunAz9Wk7k-;Qck%76>*@PU?yrIy;qCBKL-qi#LjitY<}h^8jJcZR=Jyxy-x^OM~xV|^q!pu z7P!r&vjxBC$>1s39g6!AcI#URnCHyCrDRbUDAmc}HbcH2IWrN78+5n1C%F!jYNYsVZ%B4gIA@L7vxp7AX6Thd)cLl7#f zQ1H^1xlsl$o8n>o(=+RhD?8jyVdb+tn&qmV z`3gPWDs_=dqD((2eZ0?iMDk>lF5Ha9)CuU{F864&f zgzaz_T<8R-S5ojWTmCQ)oP}ZYq5|4v@S#)^4z&nd0Q5zIDq;+**RU zk3eqR5;&QjAN2g7c}P@x-o4)1CPqjM_&Q%2=pQc}CGVf*-@a-`$e*%@ajguBQ+yL{ zIOcg}V8;KGuGkSZQNl%BX(i5`TlBMye`-U8#0o^r^P>{B|GKcDm@+K7cR<2fvrwGZ z;001zoX)a+TkC1GbTjZabKn?}WBb}m@oqMrX7hrke}R+nyYx>eiyk42T$aKihIS69 zN%_M(yLo-9*Cro4f%mRek3n!^0sDHk#&DtlF`h8Jwczx7`Fd92^di$NO4+_Ox-y^8 zbhX~3O`WG9U^?wIiZP26ZU;@H<~$MICnb_-p2|W=%sz_8_4ErW29gsBt55uqRzfS6 zgEDdE_@J27*Cho=Id4KuW#kG&%PLNG9prbI^#_FKVTdG;575sCKudmKc68jwtW&3d zG}zl+YM^7)JtWFrDT_mX*gXFf)a)a;Oy4WXU?=3Ex5y~;mD4IjiL zP-pvArtWc1ma%X1$L#TVWVFu6ZXeu)P;mPPCL6Jmju}H!KVw@*Xee3uJ#x2?5`L*) z&I=SUQd#q=O0iGO&6QWgH>H^l8@CH&&=l(^~{W2lXEi%H>n8z;GUr`gix;>6M6hjuX3I ztLdR#7>mQd`rfl5(rp#RW6ON;Q40MSI?yOXTo0b0aHQ(}nVX7>5lvl?2!#H`DpE@( zzLM}NWMUK)bRf#7i5dHP-`)Hp*N>hxHz{ zU0PP2Wu6QB$z~K|;@w3baeeZ*D(Y+=5Z|c(*k#Ai7gJWPnzfW8uR`Un9_XtEg|B$u~FS}*S4>Ins*TjRJ_ z@8lyg8m63d>nK$!RsNw|S5<3ZOp$6Q^ zfBi&i`j{H%>|nZ9f0Cbb;JPvIf~xux69U98gy!$iqvgy_S>cN_l*p1ETwt2wW+6)}*=aN05M|v|6lgr&oszQwIuAX@D0mqO0JIlagmU9439(1jd#^~wy_gzXN z6=N6GRY3NG8WnC&-}1=^zmGRK&BPU@IGC(-CXBTe%Zw5Yb;9a**s2*1*kua^b#Nmm z9j?3xHmxl3C)MF291?M>F%xe$p90C`=+!#%(b=gn9~{aP*_@Z_)x4Fy^ug!F$@J9& zgI2(|G%@X+r#)uz0B54sP2cc&kG{T7k(fe@h!#Vwzb&46JiYTR2C($pIp`HX)A|#= z<%og^8=|owBt7zMym0Jyn%C=L`g;~#w4A=JUd*ko=(7(6 zAz2VA@OVD5ZLorbL2C)SJ8N~WV+`R1X!v+y*89KyQbWzP z5$r#1N2Tu4j$0dDJ&TSgO%F4M*JW3UMABCaQEs`D<}2rdLd%&Rn*%+}q6TXW(x9$o|I3h7;hT9L~GH^*%Q^ET~uq>so3 zB{4-?Uncn;Z7Mh z=41?;$s{f=*2YQ_Vd%(!^ATLC%RBV2%w77CUmw7}TI(1I7z=sRf{HOOp!y6)(MKk7!lu4j0A_6n8k)%Tm+@^n{#|3Je#bP94ulR8mCRpCC!ev^g+gtC%F zzqrp1wA?Ul~YIyWhvFl}c zTf9IZ1phJAV4U8G1-e-N^V}00<@-TAo;%_Dok^coEH)yMeGYr5AjjZNhs|mc>5rkv zFKg3uav~Cuc%Qp0P&A`isX^%V18*Y8m4G&c#{pw7<_zg@qf#gDVs}YJYFYy0QTTuw z0}IX8MBhP#YQ-eN;Qwd`riW6Ko+S<&s;$&7lTWdw9q+TkVsi?-h_um&hJ5dt9!Thf z-=ZOx#qBk9#|8mHC5H1V0+uAs#LxM>@L{mklIkh69LJ1@CcI16HT>Qxe0?o0BxiUM z?x743mY7Gc{Ya=mOnWnhx~UjICH49IS{f%q4Uy^lyHHqVK3~Fy$CkvAOZ~(dIZ>&| z-11MDAL{P%&ghg`S=&@k@w`$AqOn3L&1Npy^p-60qO;>yA}N=IWL*?gfpud225u2ny*nkO2NqB*(-#Z4Hd_yFl=vTki8Pfy2m$af@;3pR8O ze2830B8bDKdLs5jKI$!PprgTHCmd29_L}{4Vj@)jsTP7vW)?$)v_C6TEQqOxeHlbs zdzbnlYe5o*ITv~;dW7JlaJ{oSC{@l_sk}p%M>ewgJBBUO5J5L5QZl`aFklb==3tAg_qj`?1NMhi_b^EWtAE76sYl-Uod~~IZ%BDHWCjAGc&5FG&EfJ$uQ)5i9a1PnH8@Ms z{0vPGuhwj#A9Ay(Jd?zRa4N)@^9UjS${h~N9lm_#g`vWQc2SN2D=e9iZU?~ip|$2WY3U4L>e?l74x|%sAb$+|i7A43L#amMq@51f zhjnFDg-bVE$XCdVrMQX~>}+{z`?#X-HnZMulU|%MfPz@VPEprTSa_PwWE>I#?L73v zR$iDI>=`}3ZUV|2ZM%V+y;(Oe5A#XkYF1Qwyfc4CCB6uNX-*7_UU3FZX#Ez<*7`!U z*%@Lk6r3QSA9yViDR$0X$sePHbF67)et-*aI+65lhSbO&5BwGU6%SKl|oqUO^zx}`T=%*uQQ9(r8D`IrTRY3Pk($LhS)roBugq(95u&mNHrOK58Br9%f z&V}g95uRiS=*TOACap2&sDbyNVifJD?%#Ax+3DF9U#r$ub}o5aB6Obsl*D)kN63t9 z?;ZP&`rd^-wwp-scvW?Q{TxiHQ*8Dy^oD8+qOjRq;I(@&iJ{jrzCa$De~U)o19%eR zfg*HUb_edzomu%>Mr&FD)BG0{_92B;4(?oN#d~G{WnI;qX6?~jYun1BNpH`jwECV4 zDzE;3Na*}gY6=vzEavrTCc2=^_KJQ(PQ=DDd(kG5dXZ-}=Lyc;@+2AG+(t8&)WaIG z#5F!)nes96h>jZa#L|sopqPuj(J6d6E&@v6`vkIE0p79AcTEBXvM%m+o2g%K+ zY4@4U=#08Z06xDXUQEpf9h}aw8ca;9k<;~CP?&N9t13pjy~XVG zXGYzx1G2Chf*w~4moM{k>0}8P5G(W_jX@$E7&+2yy2(Fi%!*V0K+*hv^PZSELvBcx z0+*@(1nf)tNCV|O@(mS6o8~EvE7n;F6Y9FuXlM=kCR1IM;^1S|gDdZY069l6(l93B z?DZ&W%{rp}C;*hiGC2cTHn)nrvLw<;P|h3oC(o{xrchAfDfY&BupVvTBzl4D|iV6LYjDQe*u9xRs04+@crT0Jrqmf;-P?ejLgCi$^h%z=c`w_ps_bb8-c*EifwOT7AI?1O z^eyhyNDqAq7>18A2n0YZ6hMQ9{;>q}q`Cm!$epQZ$3LnJ5YP{-n8jvpLiC>i*^OP^ zm*)0~4~Ia<4?mV@hT)#oq50D6SRBlD$%km}eY-k9&3qBWNIU11fbkX*TxpTtjiAC; zCIL=^IB!|p$hfBncu9e+)#Q&?tf-BNr@!nS8fH8ZX}%M+5WH{PEDMmi`$+?)EFcYT zZBK7m68ee%+^{e}oENo=3B`vs;jNAgYb2Qi=*{XFOossfNwg?=0r|2%Y_s?h_2Y%C zW*`vypNX>zx^OCGvRd!t3>&{pnpf5p!9V$%AL~nkdE=0sd~B{op|nm%GHP`+S9HLv8`u@)8pf)OICa8ynvvPqW7xh9<`9Es(zCBf zaoeb#2*SRNxR7u0`j}*-&MDf;!kBXF3oL7p8K3lrALQ4I4Azf1kdHlGt7?WXXN!!$ z@PT%Su#lULflayGOm4L&ENthzY{qdsc5br4_V&UGXdCM+fEInl^Uas|6(a#vPn_T` zeh6Ro9g)h~?C}NDH=F#IX`P~DoNM0To&MEOQZv*Z7Of9GUG{hPug7&* zLY@R#yt8SmUx^If@hN-HUYH0Yo??NT<+#X4;&4p)F66wn?U|IY@H-$aruwg zpQd)bY)F}G0(pjo^N5Yqp|v{2ux&g49gGW`3-L}A)t_J_+(2s76W1P31FrB2s1CO? zBnc^$Q!s5Bqp*}P&drzZy5d)2?-u;pY8LMkP<)H z58wRvmHFCq&?M%|D4gin0n7XBi^h)3yFR+vm}Ido#7C>qjvH6Ce~~noJYu?bt<}pv z1@ai7+Izs}&0cr~z7A?{8aYD^Q3wy~cRAVgGQ-^?c^a{K8YkfP#obB-KBNyk+1VZQ5brakZ1F338(APMVE2$hNVt)Ub@y->whk* z-2NdK)`Z$vQ0^pS4=dPiV&!ij^E+Pu6X=ypj$(<* z4jh3nkdakN_e{)Ri$>II%`#3xa$x8S%E~&96PacNBLs}PPGdFAVsz$vUrmeku&9`& zkxt2O*>3jC^`G*qiLvyOumwve20QH-v|nThbRrE@VbFWY+=b_D9mn!vNQkSB4yDxsvu-)zBgTJ98|#e)c!>fGCVUq|C4!Ri2Z(^bD&0KG1@5Eg9EAw=hB!Jc|BbOL$>83gbKNE zfLKIfy0qcw-!2(6b{{bl#|k0}u#3pn`GVnKote25acf}Ua>kHtN(cB~hKzPv*r7L( zf*$GZF0+UuU=5!cYTynkM&3VSU4T$J`?U)ehP&J6Nfd!m#tY8HeJmUG;bmjj0KN3_ z2CyHp4-J{PDoJ({y5e@kOh%1;JCSjIr-g5-ZP15edxPZfkUx+WsSC@gk8)314y7+wsAGmMne-I?6HsWyCELTD^) zG2Qk>kS+ zn(<@prh4t%jd^5Df1`jJM6SXGPKF@IQ!DUgaKv54nf>irWNIAl;Bsl?o19(okD44? zC7to;K@Ao^;n}he!9_-q?`54#@Rj2=TZ_Zww!VcOw^_osR4|H@&*#<=$$T5TwZ7msy2Po}T?VEys} zc+ssN@xqzWntnYAB9O7oezDUuYsfz!J9zR&iL1vUtjiqC&mL9FOytucUzK()nAAX?f@W!ECkecSt>)BDQhC-g1sVOG_8%^iC2CY43LGGm4Z`uc%i}9sW1> z-*b<9SWi@i{BCsTbV@ynr!#p{gBAv$^47r#D82 zC84)hYf5HLp|=w36pmTuZ~kWWG#NcC)vtxI=XyMtw`liHMjJ}pJ7o}iLuJ%Y>sEvp zCQJK`>L|4ofW7kORG*Q6)4)#hef($JZEc`d@WxYZh*TJ)T!KV~pu;14iB_6KPZC5JqGD~0e#OE+!p`4u zoF#YM^3mdS)q$+tzekTHK$89|Z2PaLVrZP6e?O%1A)9EDD54<>~1u(%9bR-$h^ zB0^@&fagi{}Z7o#W!)tb1lh1L?(+KzxX7plN3?M01FrSu1>WaW5k7)6diE zT}{*B3gP;JB#|%~4V^quHxq#zvJ#Y#Lo*zMsqPE1_%13cRJaBNFcWHC_X}#l+)c}<5by-p8D8;qJP zwvQFX=I-|1h8m%4%xh+eq+Kg`K|4h2@94nlYU)azKqF6M>QSrb4$TZbpgzKSauR@li$;w)L3D z45=tymdg*piJ7z#^P@=)BCK{)QM)iaE1-bj?jX%82duuBwJnI%&1LKGnRt!uoI4WP%MgWJajA`jd!W z0pRMjB)odFDII@4C~lnW#7({>D|$kaJdX>gg=DaYR(Fxwu53c)jD}^7JT?8)$2Mw5 z%3>QXuw6U;gI{qWJO&Cp)1%d;&3bmCwhI`GfiGFEYzkUWzenKicn#k22>5$i%t|@= z4`|LS?bv%V=z&Sm*F~5hYNr5zOQ0C5x_OVlj1NYke9}3z6~AA=?8qM79u1%3l7!fP zstjCg#489lEER``on~Jn%%SXtt?wBX=SsDS6lJgGJEP>Mfq9!OkwHPdGcH>Ut?5f8 zQSp6#R|jl}i)~kG3lK`crEIWrcw4If7AP*8Trswbn~0CK#8&6USum3Y6V%l82qKD@ zZtig!T?)xIZ+%js2agIX$!4$Y6MD~ZT&z6Tdh4_})Wy;uaV#vCh6Zvm zUg&@G0!^WBPc%TqyE0AsmIZ!;F9HkQ-)UqE0#Gj|0I`c=k~n1`zvuznKcL9z>H2v9 z+OafXSW&S9wm?d_zqdEKlC0YPVn<4@b4jw=_fA5B)m@Kd#sf@dMQExGeFG0gYxa8Kv5;C zNt7&je#io^7w0fuX9*NTxk{!HD+_y#i#=OEcS(SYztD;(G@L3V1m*HGebt7b0m>Hsj|@{{Cn(VQpA zxw(9^$U?3)(k=W-_-+@(Q=`YRH7}vI=Np6B(pK2d0x>3Ui|ccLs0UCJO9|$%nCISw zOd2jn{VzkfZyfU|sPQeH(Z9W6L^Y$HwB8PZ6E*&L+Jy^XQ~c%a6Ajf*^1zQi#@>hN z(&9*woZ|FVtOMT1!)Dv92>scNXNxzGiNZnOK-yoXx4) z+sM~qOn<2xl^`?KV|C#%WH!a!S8roeamAPTp{DnuJT_rR!B6Ru6 zw6HFt=_gVJDtj%XrP%Q765DMy1^_h%T0KyULz(ojA6LX4bdRsiZO;%F$i(UKcv!Roy10Y<&~K?`N@(M0BGFQK%xoog`23dQ*rgqY%Sf;UB^V#5uA4iqohq zT=Bl>{?u{UsI2iF$#P8}BKze{^PN1cwB}G3dpJZ3S8`?jm){<`=k(Wcm)>St==cE? z)sJGhv|ncdvuS%^LGZ)rJ@rmCG$q$;ovoYFUc)PL$)AsbVy5)mW>7B6VV)RA`$S9f zx+=O8Pz!hGm^-vr0x~~vlOKiXz|IrI9{s?hx}8+X;tHTE4DMV2|Hi^Uk(MU!k>zqR z_u@2rLZ{%P*TBWj1Nu8w52W1r6mxwXZl;WNT&}J+-Vvub{}M}i#R{M}jhXxU&+NrS zn3xbqeJvu|EEANZNsa3jtT868ps}7&6{5s)ITU`CiII8rVH|8J1fbUt0H6e=h%|u5ajX%g zjbv@<5}10$LLknw>f70M#o%RLy*xHMV!^k2^|~1;w4 zdDfdHp)_8|_mTIm@V=nqsqH!niH?o&#h({UvP5WC&J3Y0C6?C&t(YL7aYmUdo(4sXo&fj1~L8W45S@{ z1b=kh++FEimwg8Wy3^EzkK_2uUh6Jm(AV_`7}#Mk-ie~9)~TJ9AK~kO`}c5b*~=`J zH?f}$r+u~&!bCi|hwF~zD+9j8BZpQ?3V}l?P_xysbGHzeK(#;|rKGe~`E8$NvU5h# zpenjUzgt4QZZx%gizcFdNm_DIR)?fspu1uw%~?4-SJ9I&+c(jb1HK6_Z8W@?woCo&Uw z8i7PoVj%T=hr9U0xO4X+BAUik%Ra_&x30aVtYS4f_GkVzFAKA6;7qCAoH=SsB*i2a zdxz3Y-g&E_eX~bS`NJ!|cI|?8Xf&2%nMM{1JY0i>0=uGAEf;$$_x$Y zg7mjgWU0muF?bCKKry0|V;VlfkRL5=>alsL?!#7y90DZREW^VINb-!nJ$?efqeuKU z?GivsKSknl@0`CZ*Hba#ZS#}JF8bJ5%G|gJcTBEo=J>2Yw98=FxgQaTkqB1?Po>u% zh1D5*-b0$So)ico5{?}YWl52nBK2?#%&=-fpO0_|+e0|6J#8)Qpo@zbVr-Mlga%o6 z1F^EE=gzNJ2x@Y!!6}=O&&itZBn=<1$CNGvbcF#Tj%!#8yrLY25`7oVLHemoM9ZhT zlTZx#(huNP)^b15W7yI=eJ-cA+j5_SaqZ|RL)#7T7(vI#X@x%*(cWe~mq=9G9+1LkMPc;5r^t_H6I?hP+i6(J&Elg#TZ0!5Q{>p#*BM+coiz-}OQ>*Z@WK(| zdqV71a1vfVE|D35KH10z(2XRw!H~L@&m^$!h%tzd2saZdbpwG;v-md)o|nFc7#$7U z2Dn&_?!xkm`5kt4Xve9AAUxh*G$0~;Z_dKQJN>UU5RO?sAPUk%qP!MeiGu6#qQ2bVUrCST&@b@VhEpu?M$2v@5zA0j6zz08 z(lT)6hm2lnxS5v9GJG+&O`0=q-Ytj7sRX(Qs4hjap{+Ik@t*mqUCp6Y(N?ucS%!Z=e%B6$9g(lr-XH& zfGx$oSPT#Ps<*i6mOIF{C@>``#Ou>*hsO5c?H4(9o<)s2RO(}v0vDpVu|Wc z%iVKq93kGMV_fbpLwff+SVzbXEywvb-mpN_cS7BG?By$oG`X6ws0ug}h9{-fJILs1 zuf$JLU>_G(Q-jMc+G>X^h9(so%{xPYNtPxE@GV{_=NoB_ijI`>4}@07JJT!3Nk$5OB7wM19AU+1&Mp>KZ#gZ(n!d2HjP+(M#T16VmBxV3^X>~t=5#~7C%GitaR z`8h5RD}2A`4uhK`Y*^vxU+bhmTS>!ISAxrYKU5YdT-Xba!2W$GyBcDUhObP+Iwv|c za=@70&#Q2yU23JZyZ{h7BsbQgfd`_o9YKzExs%#iRi6^)gbs|#GFhOV0Q2z2vAx;7 z!I?mzQG+sTDu3q~r{wz|5elQi1yEhy-JEK<7C$*%C+swAE0mR{7m{_gw_|)%3Se`~ zH|JITg)EooP}(v>z4)6yN#e>;0j0GHJ<7YYuAevSRTLl7ZnWa@_wwpZ zj2H-`p)T_MJ-==Bj440iGOGb^*pEW+1Yv>yxp2dylcGZ-0>8(5t>nn{GOft4>;rFb zo=C*f9PTT3qk-xh@R?ur1_BKs*#7uR%kYOp3xw=EGcYZY$fu~ErCSQd z3pf;HDyb8w|7Y_iCpkxE{6Xqo^$!AM*HuggZa?_QXuK~7cW#$4aa8_$ZQ4)m#75?dM2epkB6X_A26&&{)#$?iUm_ zrV4`?%Q~F{GVge^aNF6oIG_Vlt@UDHUb|>T2vo;;FOfXp{S?CcPGACjrtR)RyFu?GO@}!o^71Q@)7BwOyLOhk+yzu14p53 zWNM~!$_2OzqwjCd^04f(UC{%CjWVFa5U=qim5ITgBqw)Z?hp+s7?!krZZu4|o~pLQ z!QY#CxY7NBtf2ZIXGmxs$p2Qw_4ur5%mE?DG#=K2uSyV$xqxFyoT;!7Ip?1Jw2gT; zhKMc{$q8o#CUUe9`G9a3*Cj#*B7MOLu9tPpYWH?gmMoIL$%iL`7*>xe zjS+Hu$09;`CaK;Ehjdl-n$&M%D;Du=?0#6rN{%OU#%W8}apf5o5#f`6G1FJi$tz|S zNGs>&cM3D>>LFJ7Dn!gov4j_ngiNnAy%;P4xTDtaH9p_6|&&^d@upAmK5YVH`P@hJoP5IJ$%eBne}CkjMLa}Qows>wYq|)WHf(p9W&0Rz8uxU= z!3UbV?*n;c5^ckEk*pnf$vG3>0UD;X?oljegwGGb6&0~$@2G{W{6zGt5$iY0Na~U-=vGY>#Y`$QvNlmn2-&H)~&!9k0ND?=Ji$1 zYZw~6dIiQNBBJTsJci2oV&bEuJO*8IJzo9kb*|B}V3qa$;rQ2|oP zZM{%>)Hof~2x&s<>0HuFGbU@Z^Wwfj|ByoG@`Zrj8uE^)pTNP!p#{6x(L*4vbnEFM z0+OqlWmh>2`8RYF(N613TIm8|3+q(Y-Gl8&N>)c<3WoG2Z;3U-))G`JBz0JOi1Z+y-OX2r*VTqvLXF~bS3%y)q?Zn3 zHe@{^R>0j$u>5Vfpc;g!{`5)*65h(WnI!KGGs%)VEF5)R(wOQiNwYO7;XD;8QFl%I zSvyd>c&Sa+!kC8P@uf^rKA1kj**}uVN_+1jW537c)ERP2^aWO;QGRTL7M(vZY&DG= zdkB_a9q%uI%UxzA5zW#PT2O$4O3k`(6#t4u;3OAJrHfI~{L>H*^NzZv;-bZP?M>|u z@U)1~(Vj4z5Xpib#TN(;>tQaHuUTNoq3`i_5yPJT`+gA|;Rf4q82bJdHA~%RH^SDm zHx^ca`jdCZgYH)e(R@qMOQUrM!L-FT7JFfm10Z;I*94ycSLK`OR$zu&6W}jRaYWzL z_aM@<87%oLFlwJG?if+I^KUO)>APFp_iHpBMq(QHxy_;i!i?3i9o>VK?bRDK{ANMM zYN}2qh6pSU$}iI`reTL#S?{?cDTeL>C~MV(Y%0HM5J}yc4ekcVv4o`JAnOKsfqn1j z9-q*~8xdD)u;M!6@F5K?0J})_7WUIhRR(;+)j_jq-K!iQjxIL8kBFH}DKyk$ zkA=bINzQ0cy%HWz@M3tRcPsM^PMhDEHcJ~w8C0yO*O8Cdc{`9gGg+OW5Ep&)-LDH2 z#t7twB@$7dk2@z-ou$G2;SzZ;Eg-6Y4lDU#qj*6hBz1I;O4AzaG2LRi#yCW|kRr$$ zDZW;=uUiSO!fF`YL^7HYmy0`21F*5$U5kiL=Xe6j3e+TYcI^Ge+mKfU-O)~;f+8yV zD?6bLCzFWA+@Pm>>ZQT6;t``9)``Bbb>jL;6w2q=JejFjXwliR^z_({DYH6uxyi{X zK}9oCO;?$2cW|CuR7c*pbPM`NsaP)atErHqZ8IKbFOW*iz&ITsd+tlLp4Sifj%^k= zeoN#MAKEhC2zF{r7$zugyb=eBPB8^2h?*%2uUiEo6LkNb6M85#ppC+^2R@>wW7q`h zl7VwJ-QE-U)`(6r;zmeME|~KQ_HaH-^dQn6na!}}ZJEaI^Su(w+>Jmcs7U00vmx8kys4S8gl0 z6(b_jYt|(})e0jZ4GDc}^HKvvf9VIY9ebWqzD((9SpObck+M0CP41V+<&E@DBATJ5 zHm>!EyFls60e6IXeb#nE^Q6-1r=S=e{QlSZK0na)wffnK3N|B%yXf_8r<#L;SCqG6 zfIj}Pk122o`oW{jP}Af^Z%Y|`d93r9BUF=|*`RI-g~f?5%2wf>uJ_IAY&yz6*|$-O zzDPv8*)Su<#v(4&1PU4c^acK`>z^+IV59Fr=0R#uQrMS!)8S)Z?WaqtC0yuHN82G0}Fj0iTh!u)IC#PKb=H9R$k9CqPlPp-WtY?!E{-)hRaL+f}+k3 zwo$DZYA}Zqyj2`O2!oMynAZ<>0n9>{3Zsx|DYEEha@2=00u3p8wl6{`)sy*MwHUgj z3CNv@00C7CD*1QaP*w8Re#*^>U!$RcZLbg;><`^_@U>s7WVw1hWq-&P|3I^XZ=cea zohSVayg#XX3(FOM>uW}BnjB57H?z11Ndd$Rf8C8y_+G3YXRp0Gg_e+vK5nYs7{bLVs;Ksg$ciF zjkF6Q(JRRk{RO0lO|YtdnxnTB4xkxVWcyaQ3?H$Z+^&T3;A8=QzWVp|?mqlg=>ho) zFJsX#2*8_MYdQ6U7f@CAm6s#xOUaSk(}k3eyJ!?b_+2cu>sv zpcYUu+J%hnb)*$~9hclbS%=4Vgn^P~JdW#V4)_-)ygG1TSz=1`RyI;OmmYTxp(Gel zIDC09_Yar34(^}P-Ohz4?&NDYuF3lplHUh&Li`Mfxc|JQe*{gg|5s7dUE5e_RQVv0 zv||ui_38bs&KR*lni7JlC&zg}yq`GtcP=ih4A3aUtc#p{n2u9*z-VkYbxdvs*mKK? z-`S)QnJ{G7ujgsiB-)DU*JTu>ensHoj~A6WkI0Oi&0_Yiy0julOIF>rUNh7f{)634 zkRq)b5}6@yc-N)5_2gFgKc80cO__WA`UgE9_RwFfH}Cy^%Y(S+&@)BsSTTaME>~3u zycGgF3UW+r>D}lb??u2K(*f15$&Zz1W+S;e5ukPfrG}4_o&8G3$+-< zDAEuM^;9&%0x0`);GQ2}MlqlyiOQ+D&B_xA*-E_#V!dIhrnAQd-5bMcF&H0^Nk5^` zMOQQXP42ZnQ}%^tmJY*bkS%VjA&fhf7O9{;%hVQz{S12Egv&1lU>eY~Wm0tOGc37( zIIDUd-aJ5;EsRi%2U+Zg zj#P4y#tU1j8L9y4$7x)4Y66G;jQFie-Uir(BKV@XE7ZylEA*DDw4im{+s8)?3COf! zc-yrc`d+>@?dl{K<&!hRM{}c#FhI2}_9z5-^rfbKQ^sOgCBfl_fT~WR=;nA(rZPk! zs5#ywg;CoWBRX{Zr&}WREDyg1r}_%Tjc9tF*)+<3Gc*PgU%M!KV*k(qlA~FP15bCN zz`5M@0gw9W1~V+`jC?^h7~d(iRQDNAQVQD1%`_~8ARE-D7uVv3+`$}$u24lg{fzxQ;x(iKM->|Y@4L%Ar-%>9@d z?V?+|;wJ1!wPR%{boL5k_`g>-ng~O*HuYnPd)O2q7U}CwD`e3G>j~}n`jTA~b9FlS z{dQsD&XbW(e80;%<|Va$(14t-=)gMA z8Mp(b*~JlYNA65UC_&Dp-5R`|{NMipJ&vj$)P`6``b9>%TYoZ&-1N>Hg?6kTxCSQ} zluPId4@=gs8MYhlwbWKMo;6b7O@Mwq)XhI}(jA`?Tv_h7(KU}{Yqp# zp+T+ju5d4W1##d_hAu)YGb%?&x{F-fiNJc! zCvAQD%@l;?*LVHF^#QbNQvBXO2Ft>qlui_>>gw%^v&)8P<)zYfrugR0ALyd~qJiSQ z=l$0sVd2Bj-hJM|f=gTJ`1W(876mzBpa)yri03(Oh@#6AAnT4k+QCNNR=w??WlAs} zInV&@^*N3A`NOevvtHMc86WA`UT;fdkz?7W_X`VTq!FF^yg1zeQq+Rg#$j`fi>gE! zFBGob*Y^#vJ_&EkI7c+;eGCEINHgt3Rf*beWqRHG4y$oeFPJc{{|Hjl+NsiVHDuC zoft9g$&^z$H+JAEOO_S>de1@;oNJ<@?4(hGCDpLy+(FFJfh4^D)wPP)G=JY*M;V0% zQL9j2AM*v@!Va9#-i=Mwvzb8;RdVKTc;xYJH#=mV_@9>aWT4Kp&4_#BQjS5KoU4H= zVLrTkmp1uXFDOU#?WSBPd(g8P8^^WIPeRyCvc-Q(A8?=Kw2P5>;ns1K z!)G7yb~z+U&&NeG@=P(WF*p+ZBa#8C518%6{<39`QGB3(Ou?qllmaW; zu&uu4tgLYH9x$B(taC^|!aF>Hw9`Rno2?G;K(isjzv0ub%5^!M0^k{m^8xm40aGr9 zv$R+J^9re8hY;!9!;3RS01eyNx#zs{-O7~2Zx3|-Hc5ep#T)@x!ZxdPax-R#u+sIM z+OYBO8K=;lbUW_MwKpfr!O@v^XDQmXgX8B$)U^rgte>u;y0M$cPv`;2T}u=x_7kIeD;AAUpqEdu2~Ch9;0JbmfV$h(85#@LtOa`Tsi zGYlJo(M!oH;LgkT0sI-@p+FZ+lrsWXOi?2GN3`Ied4qoyKqB!|1|sF?hWHJvgP_SC z4TU;nnZx{9^AYl#I?k^YMi^8Fcb1oeBq~>+(Me@(D5DkAl&#He2U1miieAC%02ppy z!7cs@5|AeBUHd-dg<^)hfN=eo`1Av1kl#`j8!&HG2)uX(GBv=f6oBl46ZDv|v`79L z$W(YcgR4O%2)J8U(;BjE>X})%&`i0l)M-B5E zvX2GeoKru(3@jusAvPrrp@(T&IcrGIL_P4L6mkaUQLfoFG_dNj(q!)fJh|ZE$|S9` zvo|dlsLzo>+7lpEn>`L9K)emcAZ=as*uF^Vfmy%go7iEJ=PSQ)ZnaTpLh-@*1+)~- z^9GK?eK3*6{u(hBIiU+cLEk9hTnsA__AHmXv)e~da&^74T&RO}NH zWII_xQI103oyr?W>MQpmpAKeCUM$u`tMupnSJCoh!KE*f3KUm!pyWT8ib#BgLd!<@+xGLb$JO2SX#Mg#+pU{Cb*eM8;Sq9IZ}itv9o&?LMc zH)VTm%xZI*#tMdd_L8S%TEPJZ>1H13t0ZuYv^8{AzXQ$3H#={31-{-{GAmniig1`Q zO~=8*o^-1?lczhPU!1soni*YEB+631dRvpvk%wIPI|^s24Y!?goN^Y;lO;wsoGW}Z zFGUp%wE;BTzDa4OJhFuK5aOLwF34&=)zr?oABd-Kjq>P0*{Hh*^=_1L*pR^-CP-Xy zxavp`wr8}jJ2W%KhToo~1L`{~KR7ppP_>lEO=o(8P^!9%+tkLyUu;OP>_{Jov;Zm5x|C{g!Cvuh**$USj@F;`v$@D~#?Ul04gF9|>Hqkq-7ZUcb_D?^EI|uQ9aJI?ms`Xys zt}i;Toz&b^@BH)F)*Uz*d;7-#L2(XYNK4KewQKj~nzj_adzhFYp%in?Xye|DjvG0x zAEla)2EowIeiq38miKS$ z4)!Mi$r3zL);aYsq;01&v8hzZ!x~N>P(7G3`))aTO8gVDazWV}n*sMe8xlZ0b z`UEf0;U6#4#g$3gRVx2}$Mk9sk<-x1+q(}b>>JB#fenh`t1H*!h!cJ9$DY=Xd(y&u zF$!eFTij~~sZv#pQ?BKJ*dHSB>^pF6Drlo}w8pbGiv#MD7K6^@H!Hc?wu~9{QW$?H zlKkigz>i<=N0L%p>1c}bCO(7w@MQ^Jj$}s23pAZdz(-vM!{TF6=V8Ap*Dtd20g@py zR5AdR$Tu^NlQ#~2W7~i|ybN%5?j5Wn=bIA9PSNw8M|SwD$>kUvTNOq#W0P4jL7-B5 zP>P%>;M1Bz_vKDv+A>V-Dl!*Uv$&go8q2^)XPjnfOmPk3!*;MA-hN6pL#1KDUXyPq z*^dVAVvm3}X)5}-y%vS;REyo3_PWRGyzE+i4S@jayLxhWm-3WCS#BR-NGIBkxH4Jk zE-X@LojIx{wYf(lw87C2$#E?C9I5H8X{G>%b`3`KY`iu1EO4dKID)z;lLIZ2C!k$0 zizBd1_NxJjbpxItlJg3fley|(p0vH~e(^cv;vbrQn#ryAO_;=T#m}w|6bgjNA&YQ7 zP-44`oNOe`Q~MarO(kyM=#N{Uk)nIV( zFlAJMT=715C-SjTUJrOr0D^QpR({Dt&@R*ev6W8QAwzN9*c#9G;AJcP0vQ)X{nw_u zZ2L06{KKKkCR%O(q?!=%iKt)jtP2=e336bgLt4XJiWQ;J=NN-xrzurmUG=<)>;$b$ zrrs6UOP^%CI8X`jXmSX{%PmWwFRKpwLa3KFFKml@MAbp#LWcC8HBw=2=HpvdmNc^T zm!V&MyYi_u%KT3n+209qV;{$f_)U(~=*_6cDAiKFhyWr3!wVQE=$se-vok)ugzdTO z6E6l;Ch@7RVaUakt*iM;)zcu<$oZLcgo%TpO&oA)hy1t;PXl#>g5~Kq|b|7EO2}k!Xgsn5pUuT)-A0rlwE*f z|7@(p56i5T#K3HgAXn!)k_y<~UHqJHd5!)8(64jS{*8+V$`7P(o|H+G_Bz(W1vz-z z(P*!ongZJC_3n@g`tB%7J=$3M<%L!b9c#iO<6NoWa$RmFoky$GA?H5t%O@z(%5yE} zhd_fVlu!VI)2x;N@PYlUYha``5Iu!pj7O9lVlz&g@)Z>6`~R?YPfdboO#^1jwr$(C zZQHhO+qP}nw!3Vz%btE?;)|J!^D}nt%=K_&qSw5#h}ov-YlI19o)S~vqV+AbdV+Y$Qd_*!hSz;3>gT_@jy{UC`H^Aj51Qq z@5a=u@}?yaGj&N_C!|s@9*LV0-Jz(^`s@1-h20DTN z&)61kWehpl14!rtrgVb=xOA`|35R7j;bub-##<vLKHk1dSQ@S^_sBr4>1ldAv85= zJY#K*SR2m(^zE`5N6|T|cJiO46Xkng^=#pp9~YQsMAeh=Yd_8V$tBRkA_DCegysWA zpTFWna`XA$?0p3U6HGud>`~$KXA0KluVtTa#_GW1SDiu0m=DA+eh(QAbAwexqIuOA zTuP6uhXGKv`Kco>wPSz-IoktdGgRopWo1;O;y!Xe3e~u4{4PBy0nlFNu!5P&{XJSOs|98TuX?zjQuhM$OEEBI zY$TnE0k<$0nh3qf^^x<@ z`N%;>VzgusDYcw`*d!~1A$l~y1~*wPEWaOo@cE>X)caqh?P@vE*Ux<~NyUnk6N>0q z?E_G-D}^hIOd^9pN)kok7e%SpPE|OtBLckDv)c+LUUL3m9wD~XUI4c^oP?O8)T}Ob zXEY$1LR0BRN8nnhY_fAhZ@Mw%tC5{D#r5&Fx%e2MssJgbI)^J76e|$;6sIh4B`Xtg;Z~l9S^tT~ z8z;3p`V_Q{m31B8QkTG@;6xXE-b=IWo?C$9zx)Ow{1Eq;`;1!N%<& zrT3uyl2~^#Gwgk;=+k$DH3iCdMy)&tDEgFMwJw?Wh6A zF_JRlxI;Jo zBBce2V87V_>4kQ~j$1s#kcb9x^}7)cZRxU2;N$IgfO6;XtGE^nQ4CYpw=BDM|K_{f zMm;q;J~OM+w_3Q_MD$L3m)l*r&8iZ%m3h|~jr0X;pY+*4_E-92W)3irO5mrCV5zXd z3E_Ph4|;=B&FV{06#4vF06Du>h&b!(oZYcNR*2JO_q9DSfW^SBK=LIhcn#Ho=l?Xo*L`<%J z=;mS7WDQ`NCa2w2qnK^MeGU}K430!;4L!0Ruf44@ot1ptx#CzJ{k`jo|onkM3+ zywESrjW!|;M~!;yyB@GMoOFfeh*^|hP0Y_`H?+l3PE&FvRIC`nqoh5n<=icK*I7)?OA5TXB7awQkrqr+FrsPDZ)9elL6ZbDU|jfVO;T{t|9=|nu)v+F`Sg5jJ1>}H>s zyz8C5_wR*>gzp^Ou$%a(0^WyW2VM?q++gGB<`cn!Yz+Q^%(^Pscf&7c=Ali20|w#a z3;{0AddEQNT>{_K`G<3oGLAHgsx}sX2pmC#SB&7(tEjp4NI(D`vW%>GwE0n6l?8xGW1H0gr`KXirBU5oaFT)xIqEp0y^_ z&cR4O_6PvU*W+d=N4gmED(9SgJ_PXKnXs(I7I5Xq(MOy&Lc@m^chFGRP1QBj38g5T zo`Q{}%5M*@2%w0-%ft&jdmyjj>ns{p433FH^9dMDGxI1}U1X zT%yoibf*v@9_s~>wHGGlk#kP7x0aRx=6%1I-avk{k5ATRyG=p^6j&c#+8XW!mRdV50?7^gX>lSfF zwyciBTjriZYrr2I^2?bu{uq5j7@>ZqC6^{h->I1mp1z0v+T_`nFfzn`GYA} zgaX?D-}?6Gq;+U3((&gqd`Pr%J->4{0macon?^kOpF8-8w$pu|QxXjc4Fu#!0o9$j zDl>s;0Zeq&P6wn=Zm{i&Tnc2)nOteHjT741S;7Z{;bI7d*Sa46%ErhK&1FxoyB?YC z4-ny7*@^VmLtrDwJJ9K*I2uEHT*n7>f-kbzih##wt-)ZcAybN0WLk+#meLPpleY6$ z!K6#5V&ive_dUYkVJi(sf{0IoP8^gTIpt|(}eS8DX8!&_cbZMkHv{jmwHF2|sw#o&laK};@XsxCem z7?Wp2?NFncBUbvVICY$UMYX;_7_N>mrm}hIPX0udE-7}w&n}M+p75{8s}IzGg16cT zoGb+vdah=1%NI!rYTHM!Lc;m$I=H~7c#sTsHFDQm*ij!mhxF0l6(jLGvdGMl0YHOT zi8T%3D)cF?u%k>&r&KdMW>5$>zs66ae8T!+n1^ZykC?bqzPpPuAVr8k=Z@jfa~G@* zcloIwY*#|xrhJCVzm&EA!T3!@R0K;SRE~a605*!$2X)U3pK=3v$*@R^Yj?U_!`@(D zr1?b6_<;zH&B|ZK;a*>%@0gk9ufItQmG5uMTDEzz5(wv0rrWsbs2TDv4SCtWF2kJ> zsB6p5c2kJyfYDG13u?!tMzff*o2DqN38pqWl;<2njqqtR&>zB>M(?X&n|Nu9?mw`{ zdU;?-D(0$J#CW#qn#fdBs>;(bep?9tdVvwD;C68TF~Ae3Pgpg-Wzwj^p1Km5Y+N5RvfUsgx7qFFL82O#p%b`_5o+3Ip&ef7};~Oe;B4fu`9%$N5e})5= ztJBuN?xaFii*OC;lm^)Bxeh6fh^_>s*w_9FS5aB0DboWW^?|-DyyNdMdar_Hra1KX zziv_-@Pg?#CP7&LBO&Df;+2j6^o2|VeH+f*h{wYb%OQY4&wAa0sfKf{LQ%6(Tiy($ z1YFgcd@5Peb5l%%I(1lvg~#i{VvBpAGFf)bPDB7FM5*%94p!qD{1?CNcw#AC_gOb* zpn6mfq95%>|IMM$(OPzR!=m?u6Fk-JWcIg)9X>`x_fpPv4S@{^>ZaXQi0p~zLKUZX z$lE6|AgERp5$^)Ku5SnR(==5-@ftQX*N2y&Pr=_$4T1^n}WdT6~`*?czPe1Cxtd&W(&5+m;UyjT1#c&}9Pg`TI(eD;7H z(_644(3aM~llNUb?&vhl^?*EEyetUPoQY7w@_BbpYTSxNZ z4|K0atFyIav3Ei9G3c>3l100|i#~~52`K4$Jdpt;^c@YI{911tuL}^^pXej;dQU+p zA?ggqC*hIUg_DM+5$ZlIa3%fSyPHq(CANn>iMUDxSjnvLfm#&dfR*=NCl3o9nNj(J z!4YP9(C2!TGg!7G-`)fWtt#fz?Bl24Nc`tf(wegw%AmY;YnQZ!Khn^;KOwB7nSGR_ zG9rNZHv`CmW7E8JG%LRu+y0)Dkuw8fQVt99f5)cw)GV z)lLAem#da%eQ(%7vZZB9O*lR+>G62X>k}csio?BzZ1-&VWW9`{<2u2fyD(8soy=jX zN^PC*8NZ>FG2JyD-35hSUqK@GZHcU~1INvA(%Kpp#d0Qm2JU5`_=WE+)=5-SUIdg* z1)WM-;|zt&`4vvK$< zZ4iPgN7tjq&ifA?ZHlTYRe zr3*~tQUhZl5bQNU|E+EAPWR%x;dIuU0w*BW%IFhi*cE(X#23RfL_ltk5?e0SC#yZ? z4JY7>scfe3tG+5T40ipfgHmBKVf(!6Js1;zlv)Ja&977-FHg0893|2_z=^1Ph2c0K zh9oAJHRf&0O!|Wi`Ti{V@z%~21}%;{4@Ypl`?C3Lf}2$i_@gi&W=)XZuY%-$xD@7d zbjP)rGJ}(6Kl_TYyxUO>zRM5Lcs5^L*3b$i`h#KT7s33!YbFA4;Cqe{($4ZFpTuQi z{4R*40!VHg#_21}s+0c;u4LXBisxgnq?`RoOv*IV0tCR6h%Y_wEo;bOMpFc={oYhZ zoR{5kpH#zV3#!1GPt6W6X9 z3uH8SS)gTeF8AnQX0o|JCpK{5AgD2%&OdeTo3;)>bg)`+3g!0oixnmr|0G$r#jRz+ zp}bd~#+O^Q`ANO1qKWd9N^X?fmv;TIAioer{Cy88ehD^P z1gu?P=TK#RPyi^XkAcLt8m{m76hVSCMAtAT<*2YW!kq1j-%{A>rZ4X~`ucWea4sH31p{tt)RmPyW>wS*c zSf5nWXWMB-i0E(VZQus}exo%F*7mp$aJ2I1X#SHjO?*z zi~42!Tma-U>`!AB&|*x9SmepZ2sCCc_<-P&+yW-ZVrS7{F34Syn!-ys4cL8jWeRal zI+`QCBt4^#_3_ctm5QO&{Dd^fgdd2lkS>k+S}YW(d=$U^2SZ5O^`RZpwRve0%I^U2VK$r=Yzek{|$Cq%fE%10Xk3SgPhGYtte70i46ygl?OkA z<+**BNzYnA_&`R?;m?vkEu$twR)5vfRN0jsqxOaL|6G|Fq7T@BH|NKMK+YmD{{KdM z$9_lGSEP;6(={Le9>a=V#s(wzJuEg4+E2w;1_)vq7d^zivh(~vaR6z$ccZ{tN9Sk% zi?LrNlL%M)glfB*Q}X~&edY78dCP9NIl1X?4Up}NR3!n(#cJaebWyfy<2*G<(28km zu(BX49HKU?>h6A(bdOqXV+9!Iqw1J#8+(%w+t%;br$?h8lIf85|Fz}|kxCQ1dX$#z zHgbA+v%z|fu~B>x#@&DWX&Z*b`kcBQ_~hqIn8qZXS6rTDj0e-oP5t^=tYP6{+Mzp7y8W*V^(zz}|;y<#Z>dF|0rLa2gUZPJkKlB%DR`_qn zC7`J|(1#N!dmAN9!)KMEzQxfZ*TXrO|9LI06P;!ah-b==a~8F5e-Rr`xqLa9#g`i{ z7JBFK2p}^M-1;3jD(P$|M?jHWUG0No>wt@nfV6RbPhAB<6q)1_|ESW`nwNnDq`Ac) zJs?sz*efnq4rL3aw(7Kuy^&d*iF~d|HJE+MEqF}Mk|F}1e#e>ilPSw{&!Y#XH=y~J zgq6FKrdQJv%fg78w%zRhQmY63YS16XE&>W1RUF-J)~+atzrz#B6~YVmI!Di*D6xZx0qDdffk#+E2}Hsjyp=Ew&%heF zvH^z0!w)-+ugxqMaF}VwLmgZi#+*2b7H#Mp3*$iPGMgc$% zj+H@%OLL)*tIP&v=a$EWcyu0>Bnqlu-m2`2qNw#dO}UpdDEx%8;DUzq7Wr_~)fyqP zU8#_jqD{zVoWKBG+kNM3V>O}CiQe^XF2#iY3gju+ZT(~w2D)(=-HQS{oGLUu`J-dO zk^dN~=g??UN1^b5bG7_xEMmQ0Z*@ERSn)C zY3c@0=dn%@GO46;`EF{;o3(kvQYkhLLS<9ztwoPJ0Cif*e#PvuVVn|>%Aw~B#Xg?5 zy#Gd8D*Ry4SoV)uI#*0_3*J{A*H!Ql^HcEzFp8uhmq2$H3Oh=zJpnvf$;6dROm+Bs za&PN|0u0Jepm83daVL-YXxP0#h5@bU!5(4rG2njZAx;0&?Xfx@wWK*ad%>pC9bPFx zr2&%fn39F{(~Eqn{*iaxFYp?lpU@YkA5b5cLIV{>2-r=ugKbezT8a=kf%6k_;%g!HR&Q@m@I`u;6-({CR<(Vtv_-}o_K-`OU1P47Y?)3# zRXgQgZlKj~FwERnHo9`qnxg|ZPv|6usjDbR-w>ub3PUDE&h#0?E z6fC19>)`A6fV09x7hAZ5Gz1jL25-<%g-FCgS^%ooYxf1SMxrc-w%5{ad_OfvtjNq+>#vRZ=hk>yLwkk1T=1lbVj=&1&q8F4U zf~>1*M%qfQu$e%WpYA)tbF1is$~+3mp*3LsxBp+v4N=)UV5*+1;Uy$3Vx)M3XI)EE zSiV`(V@|j+KtF&{dwQ^Cz}c_ch~Kk$+<5NQb|GKOlalXE?8^7ddVd+n9<%6SSimZY zC%7;Lyl0Q3OIq-DIpizBj}##is2A$o?>`EPl;^=K%}vz5{3R*RpC23&hUtZKtCX}Y zo19y|y1Race7mpfEl-!~Y_W}xY$j}Jvj-zq3D1)>7WPQAn$3?exjt2nk>~(DM0|p% zC~6+f2b*L*-&xKXrxoWUxma;c8K;@2V6yWhw>#=Ewnmt6oy*}e@Wwe&ax<{!;?*~r z7AmWPDOB z7n~jNB`lTbgCSYGr+FYoiCP_WJwz}8pW15|Vu0LlzJ=9s*xa&BH7(#po1Q>5@==i| zb-g3Yfo?rBhbp}=6Id_XdnIo_uWWH&MRYqMl$dNUqR^)dRT`sxL22gLjYxm%h>m)M z)RXx$>`P-1>@u3v4k?+k%>&AKSJZ%Z7KULnscy^e+MnMd|0>AL=C-uz~g}Xv#{sn1RSZ#u}6zXPW#f>zoM5fWR2AO zu)RM~+>)yZ4~xr!wh}H?(_D_dq1d4B_zot0VnzrMcY$Q!<*DYDflH&2<07 zkr~)D7FUdVh0xOp^UPC zbTz5shBFiQl{i^!oJu7Tb+Ft}vB=ssJ>?EkCRU(sQ6_2JX3BFu`Jnw9?61bX8KFdi z`X3ab!`0Fslah9M!mXM3G)u3kacw-yzHE2o(5^Co^6 zZH=Dr+;*0|ah1Qwy%8S0lcIAO)by!U*&tSUTmA8sYFns>9-aFb7-`o-nsc3{btLaMr!60SjBX~q!iYTY0D@d(aiw&W(J>7A zl3DC(fr9zo0k`SS^LLn$nnPKYUgpT-*mTQLl@Ihmh*&|WtDG7h95K{*hFGum&#Ya7 zs8OWI0b6DX6H2l?ppsuZX?f9}IN%q3$oh{Yp-Gdze`!Y$4szhXN|tj=?rKXf1}!V= zfXW%J`&HFc=%?hfMi$GPW~$i>uJU@Qyrcmf*tdS4Bqj^O>}bjnVgJ(9b+*KjjM9Hi ziOvdCqTknxEZ>_W@sKxxML6A&+xAPx(1^-U1n|qma|al*BF7`x(=k*f;#yTo1C~7P zMZgLAAa40XwKTm=!T)tbH!#?J@;N1UxZfY-4F{IS5t*ps!~4Auqw=yfJe1vx_ZxQ5 zGCCKF+`XpO)b|{P?z{08wcz?`;wgeqkQAKiP)E!ah=cAf^Az`LLZ@BS+}=ExXEcC~ zd~DbH9QX%XoEd&fS<~^Z$1Y`va}{S)ZV$RJB|*3;vdz7}Co>~^c5f>bp~8TFrp+mw z31cG>V0i8abl@27n@8YGAk+ky#8Nfk9l2Kc3Ae@)q;91F_v8^kk!ZM+x)br2fed*B z0_b3w?qJ`3>}lmF0YZ(36Qv&iuo&hRP@pI|fLY>n1>57uWEE1ml( z{ZA``@PLa0M=bi1h^OmpGB@B*Huh<7d1O{)XG(oaTA6Y;s?wiWj+EqHX=U{G_ZS0W zu3OXRzv!`}lB(#mp=sILp~VWtQXxx!A}xOOY?aZQ3n8<_YQORqa+n|CC3Bv)PMq4h zvdM=huMvhC3Dc}CD*0*_d$e1443&JkL(Bz4Nxh#~6UF&M`HzZ9xP6Avw}fT_KQp`? zITa!#>UtICc z@CFn#S#@?tKh7>*4Af-jyyU}~AXBm(!aYN9cjP1DV_bmn2OIAccp)en-S+IfM9+^_ zyCaxPi;li7s(Ho{_Mo4+TG&u;!{SQd%Xn%4GZUHHjv)^@s55cnrK6`@elJI(y#M%K zb#uR35{i;;&vxeS-W)@OQkD+&k2N{okL8%R9rWQ$zmXAc1qI-jHnRT*{)65L?}fLq zV6`J2_uule^x=ni0lI!0wds*2agzmwfdey=w!Z0EK`LJ>_~cR_&clAFtP5Ds+*D_u zXu70n=TR)vZEleY=U6cE zzn`z^hH5FIFeD*MpTwCoPS(3itxlJJ`NJ1mj(BI&e9`&`l4ejmWz74Jl@E)C(xk&y)8+z^QfJ>My*)=Nn6R6l#upy_k3EvubLMR#6qpu&O zzn*Y`#xMD8LU(YcjebV!oJ7r5MMlAMw-lG5E%YQLSxKn`_x%G4I|udZYSUXkH@@=apQXH@jfcPm zMM&<7cb}#nfWVf)u1@a4WtrzrKQ#*4+@cnmv8^3*n_dLJS?gsQ6&!l{+K1Hxp>U73 zr4W3*xlp{7vH{3jHb;9Z9sJXqo_htM6QU`Ab&M!8!HrGwXELmK)je!+YRBBjL2`Jj zBd|OINxX#$BgvNea290TXz^wT8>$0yc&v01PvW?-o2)=;0yK6u;9~j^b z;g`)k`2#R?rqQ6SC-7D*ZOzcKw@sp|E^Kz0Fz|>I zOpmD1>zc__fWVJSsa>@_ea=>flD$yn7qG%Gk@0Puq=dCI4IQXx8tT?#5=em8)o4L# zZOjDid}R=H+ciiHg+zdzLz<19GGLD%0&|C2-bmM<&n5=KouP7mVRqQm6zZz$JjJa z1M4y(kf=zcL@G|TYl?T4z@9vG&)~ytJX95OR$U_R9U=atMtEK35;TuEAsV>#>g0@F zLu}<bxfJ*s%ZBAnWi%^T3<-CX6ULe(DSi7A|TlvEz&~13D9T5-v2(iC*c@1wclAPu0@(T3mgZId8Lza}IM z(|B;`-4tb2eHM`lNk!y$r(@n2a?630=&ZN5ngw zsdk0GUel5eIGDp>=*0fgj$t{Pu&V9?c$>HvD6{;l2u>lr$IN5j-1ih9xDI%m&-lw^ zVCkz=0;n!N_BPl=mfu4)$i#lQ0@H!!C3F-dkJ-+Z^SR1Ld0}8BGIbge{7@&iaWmYo zdb7fcS<4dK0n-m!^eL|VyCTzosJML#kRZy|;Gh0la7&$d){{sB@tZv}>y!KgHd`-( z2cMmwH#|I&M5k;PQVUTJ7pI~7plO+py7t~hb9Fv_LfS;aHwZ+ItN*#cI_agyYe-H= z)kOLf>MPocZYsOoO6>`(9zGR9pgf3qbB9m)w`zj%NtNyp$a~b-$~jus^T|E)NbxMX zic|#?nV3mMg+(4s7v*Qea~2X9mM1${vN!agKokB3-?KVoCGLw*irA0+$pqfF&e+KC zcF5*PqZ}#nItHgocP{*dL!C{o@0mYw9$IV%|u`R0C z`LS>rUY~-qfgp6^{xP|+!7TrxsX1hDP%(?(8l%nYHKG6y=bj*!=Ai1`5^OpTEGo)R zI7tCNEXEnkhk1|BPqPJ0a|8E*?F>@r$Z1$?9JcBV*AN&rBt@fAnRPVVD0wcF9i91g z!kpyLF}gv1Mc&}J7G7hZc2rTi=>Z)5fcLmn8upZb94eIi$gQNG_&5i8!s zlNFl8X#&UtW6S|e%g%l2HH)s$e8>F$FT2dwdVxh5db!GVv|w?vBIFMCEQgLmzhfsRR0BWrV1!Ww{FI~xzKG|SJ$1g|VRe~H^d1a|H z$q>!d?+GV~u7;R;fqSkjLdB9D)!y9Oqbdl{bp2{~UIA8#O7DpG+j!66K<9vkFvN*w z3@^ztRn24WN-&xed|Ho>RuG@YhV;<%G*|NkmX$Z3Owim@?(kDp`7t{elqL#xzQcWY z|J>r%k6U$fL{Ok6P4s#7HFEU0fk@>S|JQ*Tm%hbS5=*h*$0<0~PfD{}mMkVN4a<)7 ztUNr(syV00t9RT=p3|qELnoj!qQs%6*Ampw>G31eiXrk0Q};H>uy(HVk12`XS1qCV znsrWp#Yf@KM8IMIvl||G&wzc>FOlWTYOZeZX-)V*m&O|6P!dRt(-2b5k~l6?num;m ztVB^`C2%-Obo{{*x40T^pwkOhYg3$n{U0bpQi4TNtg5epkD=ESO*eV!pA-@^zCbwn zOWd0C#9!G56}evXC_s{Qwwf!Xe|kuYueevaq2Jys>KznBpx>I%KYR=l6H#QeSKaZ2 z+z`Y}Nt=;5{ahICbXqc2Y@>iee}s)^3`gM=;CNw7#-pMGMKtv_3?7w(jcb_K0g4K{ z9EO#4TJEQ{=gsGv%!>p}U(B{&r$n9Nf`sY|=GS47a&F%b`ZKPt_Di{~MsIR_#rqZj z3_B}TOM~;}XuKs~l027Hm#^VsX8YYd>g1_eVa`5;WG8YvFro!;q;iFWwR?*Fd|2+NTRjkyfY~YN+V%k`DIthOI>SOQ)`X!ji|4(7>|YI-*659%^C!gD35sPP|jy zq{tKL8txUSRy)_&)=-PBI?G-yXSP3!0Jl~j28VTt)V~H>`^QM(8O-=B3y)8!7wo=} zqrff@Nyklrwit^s8?emwspH~01;y5;53(6WStx_y1Y#>>z!FWhZM_$*h;R|*Jd}_N zR~deJVoc55CA&|;24oF5IA~xr%OPupLiC@G-9jxo3bItndoKf+jLqn~aCpa-O0ZjZ zwY(=={Ve~gAL!pSB`2IR!8=GZX#T+`Z=9AN3|*+suz%1h9!tZ-q#wQpJsuRmegV+- zBg$675lE0e^k)crbDfKzNpn>xcSBB_JHD|W;HY(g zA=o>Ru;wIlAf2J3R&*&Tme-yS=_CsB zF^@xr{SG>c7nle^3f!hx?i(U?c79FSafikX5b3}i@G0*(!&H>=Hp1`3idqtU2$K+; zty|G-=Sn=;6UNxJqJ7)_884TkBG`VF+hjN>p!D`6`*=RJaMYyG=(NTDT2#NsyL)cH zKJd$z+YItSQirRfd9(OzG=_!czgWr-@60D=`pWHZifK8Kt1RW^B{vhWsbvi}BD5B)?h4hcM)* z6o>QoXZiZmioH8?D!TUR@~)1@5(#nH;%iX&9Sd~<;UY#3xlJa&Ej}py9!wynh_$D?T{T`{Tn!X8Lr#}VIj7OAQ~7Y zrWw}Timz2r8F;)!xRrzoRZ_8{!)~!o)|y$iFnRa*u(J%lZ=91#;%8^Szoy2pljUo< zpYpvI13^F7<3X(!pI~J9)a&KP8v&E%d{s-3OgWJUH&`Mr-rJ{7^Jdd+A7>PFg+3Yv zJ_P%{`~&8+2NGwFqCJ~N$|JIYbU{*ow-Y5ZV>8~aahP&e#Mo20yqMr_FR+M|@O6LD z8v}Ru?s2+|Xn>ESqv&ZxCXn~Ot^04r({hXnPEq@-e1|na%zmCBGm~5GFzfw{_xK(w zWn+>{##TtL2JxJ)HG;?}AM}Eo*+*sX;nTb=*EWY`{y}o1j)E1fn#c)OU~M}w0%qH9 zbl-^qs4Eq0zCm|^(hIzBkkB1DU{35ox{WceJ;DedrVMt8JY%})VI&i}GOxV4RpVAR zUXGq$laiuLU@()&D8uo>S=lyE)63ILc;lMIe%LOPz1vy}MyDm@bS&^~OUYHh|$sEzL|AiuusT)TCk)Z&J5?6b1nfV-izg&lvyz40sKlh(YAnTTN%e$aWGZ>sk z45oeUC@Esvln-=vLfV|e^r1s-qRI6lxS=A)`RBr(Q z)xHqk3i z0U@@)zwi6QC|#2Wgw6L&R|-Y0)b4V|Kn-7ZcoR)gkDMSRy}H-M0l?vhF!Snpkzb4T zv`1iLIM`=F<90Y!F%m>SB_dc}40% z?HH3J>>@cRvIi}b>jP}q5Ip!OmfpEL8n_aE`2J$63!G44oc$OfnFas$j;`y3$YvEJDl zPV&BO`$d3%XgR){kU%3dwtyO7sL%6Jj35FWhuvg_j7r4)rg?(b2W62cp3--n+y$7Z zE=xG_uVNSNmYyD}q!e0NWLAgCcKUfNRX>2@%=W%uH(xjpVV?dfA8tTVTB)4RuWcW+ z0a7Lm)U1#1cuJz+?w~e>4#A3VgzBC9x5h<-!TNH;P4!m9rQ2GDn=`4jae0TKrP8jv z{$37=K9xB5>N-HSURA*bQEfr)6<0maE~Ow~wv)%Y@&wpMn5;W8f?M|$a8DJRvt>sa zh^ajQGU9q<%MSc_A$1x7XVNtXG=DcLJ|{Z>fkpzBCn5Hm4KYb1&DAlqBYQl;RZvdK zzq3YuwqQokL586xJE3_#a}d2{Xeo*EeM$>Jt&%&0ohx9EkszqsL(9wY-zbl4bG9ox zDEr3H;>~#!Q6tAJmgTWP$6gM>lP+q&oc((EA!rrxUPpo)zztq86N~mj&+y0=FtGY4 zvwOapK@+?`d2Kz)8HVlPLpSZtiK37* z1_+BHtM@=I3NQ*!Zp*s(N5}UqD0@7>FpiR$zEy*wV%^k9s}CqU*-58g75L&-icWeqO2_ZP8GQ}Yn-v# zbOZ2y3EJ=RoqN!klYR6-LIFm!1$$u1h42@MA9{Dm+mrJo936m;wYa$>NRVs%?YW`M zmSY83P{Qgpo+5MyH<;CKO%#p+BYqiWLA&ercxJMr^Dco}n){#SdIZk{MuU<%dqBlG>eJmL8h=p%nM{(@@PR@sc1< z598cvUrJL-^O`Yh$HBd-1$ltDD1u5RTWW<)N4_~J(FUm!GD%dPS%$ZA>Vwm8h|qar zFVvx&MPCl-I2ZdMI6LVK#FPL174HWLUfIweSD5z?FPtwx-NYlaUe(=wZ1R?f?DQ)pSc7WK3<`oia{qj~#FeC#lju$i~q-4eK~+|5!!(;Obq<+M+2pQlBm`YgX;H0hx zs$AFV=sPhW7`xKlHWFBcJSWGPq*o&MWF7O#pO}l} zFx&SFSU{T-3O<=|=bjq!7v-rG+y8dnLTI!^EJe_H{rXQmm^e|BSGsywD|pYed8F|J zgWUEti4s{g&d=0as7OjmiID6#qH|=Hr4%JKNX7y$Xg0vitM{ zC3Zi{Hn`+CAtJ@AO?yzHWy3VZ^TooFf zn|ZWDcTYLJB-~2Q%dtbw2(z=ZqACufOrseFSyyMXe(NuGS%!KhtT_O~PQ4syMte}u zFi(3^XlJB>@$Kj`KanAPmGm^;6=gu!(I8Qt_8=*Fop&8^Af-prmJr(Bls71^kVGp_ zlG6yR4hc=3U-@}hWo+U0^g8#UHJ;x;xv_#Vzqrj`?o@L3Y06R55Xsj2Z?J zZ5+elU$8LcuhzqmlU*A*aemkO?+tamCX=!q3Y4mAkbLn?Wa)j6cehU<98LSi|Ck=8 zm>Uic=^5_lT`98Xa5;_M-eUHd`CRDXvPa*A;gDAKhi-cRCJsS|{67F>jF{0HO9e^WZJq^mwcz&QId+q()WO_V7 z{Xap#ub>y^NNX~lOWxcLYG{=cX|@$L(9v<-sw;AKDc)zed6!E7cj{FwJ^9qLvav;@ zO7JQpTze0PdK6;@9fdNIP7yinf3=zDRY|iHi~^m7vl4{b+Y>!@Hz(V)8=E+RGx=mw zWRU|Vb;o8n?(BMu?GI7~Pj>ASnBpO{3W!k764qw zLqM5i=j=j3UM!SS3fxsvRmEtW72}UwTWAqJtBOVxbhuxzz@KT{F+2^J{ng3|FX9oL z=o$vAZJVZ+rO|6L2-n;xPqJzZfM=}v;rGGZPoC-{5W*J;I!c`RhY4+t^o`=@)0gl! z)}8weuu=8t+dW7WY-LdPD%XhDrEP-YO7Un0Smv8)eYmt6cF;RXOC|T|$U%9!n)r0w zY*04}$lm;-%&UjF0D+O72U_zs*4r_L*B3a7QQ?116ytd^^{?rj;bBkOT1siqnhjz` z*X9lA7+6$wU5;EMm{x@Mvkg(%%Dzk)^-6;1k4?KM?clW3y9! z@QGH#Wurdq%I7pMF~VO{J3F!yBA2_S##|W4@$U5Cj!9 z+JGh22-HW=z3|LW(@5tv3RM?`Tt~{Fav%7LOIJrGh)90~qXlM21K9ENGtf{`Dxn)h zkRMbHsP4s~pZgAz_O^RX4G8Fxop2NhhPBQRt!dAmz1{7FK@#;mI3CpjY4^}ga(p-S z*MVegt#pgdjhpG`ag8UTo!3bg)65i?zns)o)8e3|`UmqEC{STb=)$zLzBAj!*VY@W zAoTFyRM2|-{bj4AnLhhhIOaSBu@P48M#J%iz!et1@e}ha z-tZtQ)W;H0a_1jupg`eMlBrla(pHKDjZc;T*PMLy^z)q$_qb$^Gf;(2a;Rp5IFTk@ zaCN)wIjwKNb)>S##oO;*F#U#1db>?^Wmr++xsCj#((9M?1n{smV)Y3>5hpACW-9&7 zyn+c?S!wGsC6$)T272jaWO14M{OEK+5IJi1Ig)d^)F^TN4L=_$xRG@Gm4Z)M#A?XB z3J)-rtO;KEHET7`1R^#2nj+VenNuzYW?^Kw?9n4u;Xvr2AZw0%5ME73+F?YXwRv%N z;LegK4TmYB(q_JkNTygB3f$H}o}mDgB4uf9qj5aP3|k-EA2-EbDKx*ChU9A3QzABM zyik08@NxDWV`1!bB+m_OYAtb^;Ve_TJ+Wv6d&r|TTe-#&=KlKQtHv~9R|M@ZUq+&T z43YG#sqT9~azATu9{9o%@gHa0UZL6qr*yW%fhcK(;E)8}6pu5BnF8|~SdLGiMA5gi z%r0f0qD7So!3vB@hn&70Jk&n>BtLAvT*Du=u+mFd5h^feA9TMZQv0%PaSaV8RRdSN zv~Z&YDmGQs-QsC%OEkTFf#KVfq<^mRxQIVjjipll5MI+R<-BO0 zP%^@3acH-npb?0$Ta5YXE+VwBSEUyB&YU-bPu%accRYQc*x1~;+~f#lP4xUa8=zQ{mItmWG);7^VxnfHeU2fnnb65_Ror`c~8wo=AMF&Jbp_( zjcyIH5z{D|WsgUJM3b4Y=o+-Yz&gM7JsT)S>gv`5*sQ#mpYCBzL4Cu-J{H&XttZKo zr?OHH5mc${4VAvZkNaSeQ1#0W126u6yu5yOaB_FCE1U!oKv_oa`xFsKr5Cmfx6>kF zCEmDKG-ILR(iru0fsew=vII5WbSSNqmRGF6&m^=8>VQqKAr{T!MHiX&f{=tKsAMB4 zI+vJPth^;Le;}~0g{RQ9d1o4j3T+gxVz;8w%t3e0eu&>@p{(s6rz$l$0SwJHi-Ny5 zr6bkS+;D!R4L};gm3Bm*Fs_kFx)n>|mr;ix0G9Kp(;;q7CjcktTY<*dECLp5u~CLMg*!he2vg@|JIbXXKw%5x)g z8p!^)LUe*FrXYrS^$q(No55L`iOfAo5LSP;?=0g$Con|4zMt7pr4Fb>uwEul)nITg z8Ak}8Z>;DQ-blu6m|(Wl`hGs?IqvD&koOuJJ)gxDc~LjnY3bB1kDt=ipuq>2>#J3b z6;kvt$AShQa^aw1i>u8762swJ_uuFjOh~E~BaDV9%3tOp>pw^zf*M;$%jcLXJ?IcVR3;IFRS@eMy|yYie{I_Z6?C^iB32uW4=JKB%%=B|3GT z@wGfg)=Es|D6X8C#q#UOx+><|+F!B?mUC4c`|=AV-}i9mc%f$M{vOA6bM0JSyT zYfQdZSXvI;C_J+JrcXOt6PX?{ND!do1v?Uk{Z{m%>t&x09y_qMmt8={AOv0BTw-H; z6nu4us9c)I=iIpPvS|Qm`FfnM<31@B3J~4r{j%wng3t^fyjD)R)(522aMP%ob!4jK zB@Na*MJctCx*CLpTVXMZYwEfN()hq33ML(*pt9hnRzc&WcpV#_XVJnz3c{R76YB^s zhz|$>-%PaG1B2HdJO4P{j`<`q6T|g!`(MX?ipg)ikg_L=+OFw|EU#>)KkTs;4$yOy zd@0yu_ig8f$N^#=(w+&Ht3Ymp@!tHez4y;~ebtJcC6tWDakCyQMfXyhj-^@yh%6e% ze}9(EjAYT&Q0BhMt$|Mtc;c$bRzAkzX3_%weXWBmGbT$Jw=5>rlGy;$;z#|YJQNke z(}|!8%C1YuaFh4}OdVFX00)}ds*`x%nbU1s z*Jw%`4in8TOJKy&3l}_F$6tzl;^oJVvhCM@HG5RkUz-~!rg&@R55VF}@`02><(HvXB#igK&lqD#%{rTi7 zUu`O|A2j>k8EtZ@dm|7)TF0U3dOw5O!bC^#9ULh!{;08=W%$X-fmDLrGlCw{n&X8-2!^)-_U}9#X%je zZm9N8uhqcHsexmA-ojVE1=2I#Ch3r35MIWm$lenhS)PH=STX3Pi(K%uP35xL;*(cs zv4B&5d@P-&cF?1qM zqFSL@7LL0tH+Hs61UB*Zf6n>YEZ9Nf9{wu}id73jWc%wB|8;?Cl~A`{oLf^X`S~1v zzH;iKsp>LCBMJx+3!`18%GW>s{bttig_Yp)CaqX8NBDB~{@CU)!vD8Xf0So9+DIBp5SfyCj zL=Iv1EHasCh(j-@%w3>83gm+9wfCyOmr2h{Tj!92;A6+-4x~kBPf90oWl<`$_@;{_;6<$``J-VDjt5hHgXb3OyCl(_=x+l#~oo!$lUaUeOxBd8C=Otl-PTla+8 z3He;AsKVZ!tH>_uTUM33$QChd#|ek>tQI-ADqOWB&PvZjoRNcqh|qQcqa z70N8IeK#8zAj%HhAfof!_S9TBC-|6lO4c<4vJd?5+;;0TRCKolp?VH&FM(WNwMdKZFyo}g7O^W0AZz2Ss<(dhp(V-S4Pc?Rc07>`Gdg`P9iso~q+`n_Cx~(C z9*BG55bE)U*y$pZ+jeDQnZ5(Lj2|WGrg82f^cbTUfkfgd3s0S|okDaQN`&bq&`xIKJkSmR63jAL@hyP_2&2(7t7t1pr zfws2mSkKnDT;-`Q^yrDH>J9oGw6?}=HuubS;)AGk{ zJ3jjCOT)LM7@3Wfc-S66TJ822!VT#!{a3jenKYV_FQMo^eAdW3V=mrI)k84GIe!my zhEi26?Su9Q%b*2XGaoA<2?p!gL8l&8fdnRqsc z&9osJ(Lm)QIsM;U){XU|stY$vy5(N+v>Vz1*LQ}8!YsFw5UBtQs{x7?gRB#Ek+l*p z3(lEcolfM23#uc{gr28PI$+s;=>UUssnWnIW8&l=K9xXQWmiCKFFvwYEvW^#Hr zqgb(MKfMP%)$~zll^{Ed6)8JqL_mPHD&5?s{YTa+hn=NTtj7FZjAySg-r}_Ihmg@{ zq#w9@I>yX?^;8`og82Sc^M>Ek@#k=yR-L(+(2SK6BZJ5I5>z(LS=w=qdpsFlZIm!+Le0?NR z&N`0X!OmN2p>16qfM2mfl|u8Qr$z^#=|U?NT&B-dLK#3X3G^NJf*K=VtJ^rBy|)+ zRNP-W{H`@s9b;+SL5k#7v3^YlT{Tg(u{k< zyOXfF#uPYqh228y*4rcZq}m=0TqG2ni1`vBR1b6%`{s?{FcyxCi-sl zt6ay{2~F1#eGqG&*kSu@%Tp%y1YQQOJ$$?$#F&q7kOdZ!TViIY(20DoW`qr3@m0rJ ztA#WK@v#nb-@$K#hUWIzHiK>ZXzgA5yvf=NF5v}vkw=WxhtP#~?^{ZgumSOy<2mk5 zZJjX$#68H!8|3OT4m-(QL!RcBM@L?v?EnBNpnIsvDSP;^uomsg_zRQU2Fd&Kv~K|c zEXhtUvkV?Y5s{9GCO2ONfmjv-q6YnWy6Ui8&s%}noYPGdEMR$2YRSt^OTpK|B#Mu2 zT6UHGn+g=YaMhtn4Ej4veG7+_;CTNPq!piJOvu=r)U#fej^0a+F2O%|objZp>nNjX zv|x|ie*dh_$ph)D%WB`G>j$hhL_NWpSq~_ywuPs+AOOd*+W%rU&37|~<6`W5u^3BS zTcOm7iX!s=hO2-SJHW^tT65(g11cq`y+0BhjVK+zxx&4lY@LB&0;F7kJ0IgJ2+7r4 zZ}4Ad!+loIu{6-4gZ`*&a{QTEE5CwK865e#~cV^tpn=tWl-R3#8 zREW5c8EVbpq;$PwlqF5mF5I^5-qW_tY1_6nZQGi*ZBE;^ZQGo--Tn3b;Joj*);aq} z?TTw>WMoBLm22&)%m{94@$&wMJ-(H9);8t`^WV|HB|!Q|pW_EF@h@-*v{i;?J=m>V zI4IBY6!q(;22LsGneYYT5X`ZSrI4tG84#Dy=)9>_3GBQTp(sB1n9pG6aCoA1!tJO{ zn|!RQYzQ(%20>lPuJg)Xa^xnaitM7fhv=WLnNOvU!MEEIzxFs67+Lps^#gCl#yz)L zMB%O>jLxb?Mo;FsbNjl&NYYfaKIbV3Y)i~Vu|;rrp(&n;f(-RrSXO25HEOi|yQo&P z_*P8K#eL>+=ko&qjLK)RCD#~9j9}d)M71r=y-6q4p8@rN)FyKQ#i5VE#Y2VGuqt&P zCGh1Le2KjWNd-6!foEo{V-rC4uIMP{UL%u;OP!Ew-~HA2H_Ls)33!q=560|H3!wo` zsIawIdF@Q3Ad7C#=uDn63Cv`xqpcbk0=+tFsuPN1*_*}=p7UKP))Gb?QQDfW)0jX^ zEMddjXHp(7&^O^(pwK^nDA6?ULNdCxSAG~-{pogZZ0U*D9qID=>O~PRoXtoX7kl<> zkP^2%2-J*dMgAfrJ$JL&PjR_~=$`M9dtsguuQIsMk~n{kcQXS6I}~um+bHTNxsM;X z_|e8T%k1F+AD@(u6#W+;H6pflB+2U;h}FR5!)2uwN_D{H?GL|BgbWuoS)!K>ZG zZK-NBm)MsEF(`rOSTz>{MsRRS{psVJ9F6dhwjtsOaBD8RifQyGACIfo75NckDJk7w z(he!akX)9BKIhy_(=?xC>4JBkvC|*naax<}jvFh+GdZdK?Rs*TV^lE|SCp(AK1@i{ zI3zv`(K^|qeh(;;qOD%9ePrto&eEu^x=nrl+ zl(^d62*wMc!cLlhAreJqV$*rB#$x8vI_|VQM^RLYDOunmA zo?64iTVN%WyJqE@dTzc9SqHpQt=dnZp+n@5Dg|A3f{(jsGx_MAhExD52E$t{bsCL^ z6h7IU+KmdniR!UdWS+qihQ`6g3&-21S&acL6bcLCQ%FuWetJ}?2DLfi=v<=kXG__I zKjU`6u0oFCr>xlSrs2tvMZ0}fm)OYX15Ze7)_Z3GhON)Rj3GxBFeh&#-pboCozUQ;4MeO@8n?md#4j zt;}x;pcU9$b-XpS4eD}NhRp1CF6ECHRp=wPFMKBR=_n7sviN(8iZbfsUb1I9jGu$y zYi$Z-5_nhWn9$@Z5D{2zX3h(VN5Johild8>s)G;irY;RSlcCjrtk#~Wy3S1?K0(0A z`xkl=ao_O3+c;ol_n`n}QzD#0q9KTh6^VWp{Km{Wm?OD~3uyD;XU`BInZzj;9P4D3 z(whnrc(9?L;QPB1*F+`FWl$_q?BB;}S{~T2$}2ZGD^dKFy_SwjsZ#7T1h3|F?1L~T zYxq#2Gf6YT%P2!WzysCXOx)Tb+dQfiP9LTw%78Hij@#M!Cz0a2f_m8VU(Y;j!~Joe zE+(<5yv}%`<*sx1M@rh-3*Dt3#V8M=YLAo!Z$;f&{L3CcBm52JoJa!Ea-%Yk9IwV# zIF8CVU{ckNnetfkb}Mkng3@LQ8TDYmVn31$l0Rsb#*&1;R|E{KC~=?t~>A-jr=y$GLQ`Z7m@Np%?~)4`^A*SFYnn4c)&DUAD0*wc{m0y z(Z^Dwkl~9qjXvWz5?sSr0|aMFpmb16QrqRUk`~)%lW4s7FYVR-l;wWJ{Y?`z&Cq24 z4t3EpcG`IJSQ%azz4+e)Na7B$ei6T41o7pKLMoDmCesL2+@5XENP8>l6S4Rimjmr_ z4z#73h5}Y~djd(@f0=*3A-JS)I$I#y_oV#&DgyB{rlu^qA;6wkDdwUnxuSHIuDQP}lr0dVx z%MD?O^NER;=R%^}ozo-vo=t4Yl)vrHNA%qckWzNIoZmCsJ-XZYx(kycdZpwV?xLOJ zyhf(o`VPg)1#_8?O|Z=8;QCFLY>EeDAyH`l`T~{Pp&y(glLi=CB1VU|k};?86(JuON;{zKb!{KF zCM(!~5#3?_UH##_tQ`&yUMaz6SuOxHXR<)oxxjSChL+5OrjD9n>0hrOZ^W=Iecx1E zVx;RjrtuymSG!j1%M0Ac^1V)|?KpzfpSp!-ymP^}vOM2XnR#BC>I_n!F^C|SCOWjw zZJU+;cpDvFVr>B8y^(kya_R2m5q_N0UXGzD8G7_Bqe}Y@6k#_v4;6Ti?dt7^=y@Y; zA#Zs^_KK5*BWGcVZmHFP$dauNceSwgL+6s1(4)DN zK+hUxNGQwtBUL($cWcG-AgzOtio^IPEMTv>T|M9=%NiN1(YES*Cd8{{>;vAvdNs1hdZQZU z@cVr0Dy!ub@TnzuFydwK%%B?L9Cxh=Z17$7{{K#PSH{13P$=t zT?38YFvCfE%jp9x+o1+<{Jl);X}umcPTn9={ghrmxpm1wxEt02uRO%*8lZ6Sp?#bA zBXP8Z&e1b|S2hxk>g#jZsk0N$pGq=WT=L&rI%kPt%G89lj1EI=NuaWzGhl1(H876Z z1w}Ag5=FND?V6R@W6B)gBBGXN{5p@ievwP=U&Nj{xy+{*8gvNdV(}`u=<9rPRb>!x zHc&e*Mm#Zqa{?Nb;sNAmBr%BoKCMJF^>P!Jut!qQsT4HH{bh7S&~Yuw;>9rbzK7>@ zk;o-`0f);yPvSl#$MDFPRY{Qbl0ftYnGH{FIaN!U}?-Th}N}w_B z#(BZBvi1=~?Jf9RPFzStl73g8D4hk4Tj`0otWq9YcK8*A$?8*mk@?||k>WX}f%|NT zonc`eTD>XKJG0f1)6wVz*|p@1UlL@`Al~GD*8R)a^86b$!M_#<8>~t3(hSiJqGMw* zf~+}3+=smT=u)JKx;5Ou0p#TGis#uA-l{=0-qSGQo z;_BPddxcEXkznt9<`Nmd-VN+_pu-NNzOJ_DS54`(DGIGZwELp1q^@T{jW@oS^hoKs zk_{Frk01yE#pyx04BjtccCD#gexU47G`WdvgKG5Hw|*_r!4vCx6VDk5cBO_*ya>p_ z9z%$C=R4i%7}U++0S{ZK-o1G@QiG#DqQkMrir3pY*XaR|{N}1fQvij`qG9J#uv49q zGHbmFcaggWW?>EpSHji^p;QDs+AgeN@A zkrE*~IvqJ(f@}<7-2&Y!e@)GMMtVH9YKK#&W6W!b3pEIN(H#wpCEmW%?kiqU_eJ%g zCQm=vj*43j{Y+X`@CqICdjB}sgH0oQvTqy@KRBKEWyvCkG%PdYX-Y^3Uw*;tv#5&; z+No{nM8}|V^X>;|Rk-svsA`!sIehk7_q;lG^7a5iO+=<1y*sVGYVP^`)QhCWlPARg z#Hx!B^hNxP%E3d~Yge~%-$|jZ@M1T3nBpoBJv=c+o#KL^2su%>7UEDOq{CYA%g(BS z&j#SU+}KH-eLfLD%}8&YdRD^2OCK%Bvizp7qBHFEGWtk#Q{|=bP7b|S!+d6|(YWKq z%@st)n0NIqj~R_n8lte!B`}cM$eaCuye(VBJeJ$NjwJu0aw)?S;S?Pk*s^FR;URHQ z3~JakLWZn{$$?GoW8>^j4XBV>b_(L-QW|KqkXvVV2ptLO?6tM2Uxu5Ho{#j;9cWo# z&er*K&y$B&Y|^L4U~&8r(ON=s2Swpae|Aq}fM=S_-DwLVKLk+P4Fv6NsXj*;!hSPy zxDB`nNb4q8g322URA=^53e&TH`sw&-lbnc(b34M|%C;nH?BU^M6X320e~+mxu1H=`tQI zTb7u~aVMXhQbX7?eCc^s%x)rTGm%JBAea=U3d24bu=(t`j4fWK=DV)XE`yy9a>TvZ zq;%$~1(tu*SZMG%Ev<0x&aDF!3t3^TOUnGXo_r=gWXdY3=OL~xPah>(=-}AUK-DT% zVl{t?1i3$?EVE<=)_ni*l)jvLfl+&e35~D{OD}chELjneqhHplv3a$Ive;sR13HW4 z0hNfC1O~FpNhOgg122b@$Fm;0I{c-@mJfOpbMdMyoSZZlnsrhR+EL$D;;a1l8(H2F zCBS~mO!}!r=M)&yNbGx$x=hg-HD*dd4lV^^%$iP|#qYm7`hV-}V4U?tBUM4XJ3M{g zU*J@JWTGzSNCW71nAVE^sDiHz7n$GeJ5G0Ma$>{jZ-5ab%++?8*_D;ae&iT=%!JxrE^WD{VM5O8;i@eyQnz0%3Cz2Z7nU#TncVS#2|K3vV z@&;MGNSSgAwX^h3V+K1GmTc?9T09t3wteE=`!j7;`1dE@Z49o&9{e1?61`|pdTZw7 z3lf?$%DYpV+o;*C96%s1?wRL+X&8ve_6|yADD5rx0Es0g`LV%{*1VrZPwgQq8X{Um zbAtW_{}qC`#{Lax0I$AL0&|Y}#Yj`5YR{>RBd({)zcJ`+I&OG%aH~;~g&W)+fhTr6 zQ9y*F-b?o$Y~-x?tpzD{1hcd5=}uw3`R=i zry^pR1fZzh)sW>!B*vJI?v7U>+0p@CRhzVJ*weJ@#Jw(+&ZrJo7Yb5y}RG&(D}aF`j>x zj-1ru?H8FQ9hgL4lLo{!lPuc$ze)*$BwMJCRnKLQsz`Q%jcLPN?usl)Sb9GwbaaS< z=n#JB@i@P4!Pk@RsVz_s_bwQU5$Vs`v65HapYz0LwJ0vNrr`_2FjA-&WH2N$rpl ztPi~inSNI317=Zq3A>H{FD8PWyiW z+yR3v$PLzA3rkB`E3lVY2UAb&4H;;g$TqEoE~M^}ixd^&#e$F`oL+ zs+r!%FakW^i?Pu;P<=VK?xI$jJ#|~+TyF$@Q6b1a)`RM2ktY+Ds{7LVmx1-Fu04Up zw$^);U3q=Uc0o7fXAQ*Iu-d%ZF{1@0gKiDK3kuPp%8IRkx1dfm#32*Y_BG7gU(!gs zk@L%Fp%@_pJ;bS6-Q7RA^nw+=+ER|TUU3+Io|y$8=Kft_M){%3-Hw=I({0Ev!b09J z0{zN{iVq*QE)@64$CNrZ`sZpFLG6(E!W#H3dsK~XY5WH4qmUQx3ww-34`y7N8@NY)whA# zd{%sS=hvdvYwxD>6NJ5S$`1x`zPA(P@bI*lQXzqhoY=jKbhcEIb+ZTD>o4@pjK*$< zvL@^cBZw;$lAI5#r61aIdBs`gb|yDq5R6wR)?~+q49Odf_th$TVDt0I>rC^3_*#`x zlx}xixgP}h82ENwX^~{VpOHg#W@_6HzJ}DQMn6Oz)@yVck?L!?KC7Wi1dtOnhcNZ+ zkX1keomf5f{?c28Pq&BfnVvdJpQ->kAHM0zD@e74oPnP!qD`An8!_7u7b^B8>e0>u z|I}|u6bYDt&DG0)A@qD!S+$H%^sCep?**UjV)UY)9uu85@I<<)lH(TH zRa>|%^ad3PP|N9i6o{YDvWvdM5bw`{)EYGPMSj5M`Q8r@sfklsYsKZcsKm8S`LNq~ z3%@pacifG1^4ng@J(L>yQ%BQxd9-ZtSI4ED;q}C36}Cfp@TMxLndkA7p)sw^o{D$3g8Xj)Dovg>zWlVtnrZy;StfTEunn>y zI=82@Jc;j?_~m3hueJl!B!Ss;C_9IJ3BkP3ws-XU8YHG9;J{QqIH{6ZbWnw3Xc44r zNUDu=lqV?DFs^oIo7JcGGfvxv_g&{5vQeOW=yvC4!W-L(jna}5UmqJ^f0lE=MYYL_ zfl2AFh!AD-XjCcEk>~B5r8!&z*mMcF3N=orMTt)%M8&5~-(pF~bVMY%ay!S9#NH68 zl4w>b>Tft@uc6}(T)7>K+f+7U4m6ZN!FEfm_p@`mB-c_8R0Dr|j5rS!+P2?uj8bdQ z0P%O8>OoOki8BeLF5dA2W5c?t3z#*2Bs0hC{M|f*=yOVkJ%8>WgzrY*F`?2w(4j0{ z7m)c2QcPe=IWd>w`H6qi-t1G^?t9qQ^9=CSRpj>i;O$0;3ZR1G%b)8dQRwKe3?0*z zgmMM*wnb;@iW1z$sNA~Ald=b+6*cNBV9@eS1u@`^#Pl*k`{!XPwO9Fm^$@5k`=&s0 z5Fv#?L4iDc{y|!l+0hQ&mk(fKmc5&m5gTv zGy>_B^1|K5yByyBEF7B5q|jA&*ssGos=v&K_wQa|0{Si2MP(bbKpAZrst86ljbF4= zf~58@GMNHDF=~Fzao`j~3`Fw@qYK{W(Z@=;XE4{T53}$Qo6nxZ2L~*u%5l@Rl$OQ| zHB>2Pms7zKT%KDI&J}P-LvG>Qo-q_$7j|O?{mri9)uqETO7OMV`vn_4o3apwnuyo4 z{?!A1WAg+3g8x_w`C}J2-8lQ29DyHDYYCHwk4*2cwCwYNF5cT>y z07t2tlxculO({gU!^+eW_}%DG-f_8y=SBMU!h+Yo=eG3Z3SFW3u7b``=<~kjVOH=? zPOfW9?A6OQG=baUq)IWajan1R!S4a$9oe>}iSen-`He5VvKNH7T1C`Z^sPXf(e7$e zAFU9mxwjNbB&s#dmk!~>RJR!-2?}+Z!xE`6+p{&wCjsQUKG)ujx@ejDa=|j>9mBLv{LeK;> zG=cS3Kbi~x|C!*+Cd9&}CF%55S{`d3l$*R?SP~?41bqPX1XAIqztoSu%P~iU`sZvI zYW{Q380l^iS+()R=n{S82DCeyXKsZ4fK{6o73CxFkr|SmcBRjD(t)%G$f-_Z?EdOB z53>Lf@>WrIYqqB32zwNYn)A_{F#(c*fk2)I#(<*OR}=$Wn?tAy6>n(^NXBe-7T=Cj%s_&fvj8oiA0ypQ z=+CAMW4xeXCl*uQ(wWV8fe0+X_6EWjyWnB>S9LQqC{t}Jzj6sAj;cYS+qg2?9?s|7|*RP@&{(l!=jjR3c2^c#e_l5cr*viIuk6gU6E zM{_=`DVYj!)*Tsp7tzY~yOkmm<)R5#spnFqOZI{j>%&=2PStFRApf#Q?JGig|?g(kw0>XR;^kj&KP|ZqvR2#=Z1%DC(@ENtriYy5l zu*d*mEgBRJrOx3DBD~0e{9W$G#n zid?3)V3j@{vF9EU5MKqorBVO9TA}CT%JT&_2fEmmn%O2RaK?I~+MAtu z<&|uNo?88dZt{v!c_-91_nnpd??lPD6n}BoZ?^Zk=y0kEkSg1H)-+u%{KW)6MV;>7JnRIgZ*DI|%<#H?qqN3!(ZwQoY6P8^%>!xQ zXxXsJ-?(i#lW8+DshrId>*ZYT{u-0XRd(pirgXp{(-%?g58%V(S#MYgF7^3(;mO}{ zGSJ5;?$nda)QXKX^G8aKwokBo4`>P}DALcL^Z;xoa2Kv684ykLvOuk6i;Of0*P&dO z3%JyxO5S}`4qek$Ts4I-sR%mrm8ur2SXmkel9_9}1)WOZgu`Gft3lj{Q#}Rx2YcT$ z%wY+HKF*VR1>?x4@?(d%+bGlP4o5Fh1ru>(;7k@`Kk_eO z7*`XlrC{SWc~_APcI~uZzN8?Y#2x?O25^4-`QVr;x_vMqdDmulF0$H+g40W&1Mko0 zcG5DWXf&Zaf7d7grgxr%6`q2E7=%~lv2YI4>kqb(wL zT+RJFvkpto$YR@DP36B}WJM0`$)%OKheaN7@!;(rI@QoH4Jw5b4S9j_RDpa0r+xc_ zSD8v7k_a~VnT?QrWPXjK1+T;slXG;WgSy(lG(&4ey&FV*5IcR_rau9wDI>ge)1!+3 zXkVF+%Mzdrla5OsxS2Rh!M~sEyg}B%vP~k^fB59*IJg~0!j7#a;x8XaTi~?1wb;$hAvJ24P8c6)1XWckSGaajU>cP0@FHxUOBk@a~HA z*Q^xH5|+R0Xt_4&5nRsULcYoQ&}X0bdzV*$`~}f%6J`Tr>p^+-j|1nd>u%d8C{HB< z<`Ha!+X}4KK*zCgqaldWav?$sp;&`wxN2(eT&JT^4>hf+j&0Sd0fZV~VBEo^=Ckr- zRs@TeVsJxy4g~3PdDW(YG_ni4+zM;2!R0`v*4in4L08{M0f-o7l+apuuwG|U?AQt- za8CpRQZT<;@dv{s82Q_xfUR-ra# z>IeqiMIJbXs2?-9w;h4Se?HrJU6Nl-To>s`YsbkOs!6Bmo0v$mqO7fvf8R#zLtZRX0VnvR+m`cJy;~Ia zYT}I&C-&HYzg({e+{!!UId>Z0lq+)24TnkO?dh-12WZdmTjfvG$N08r+J>^wQFK+V zKfl<%=9~wF@Q`RsIT1*-|Ez!y85cVCm-c=_q=fXg%~$eN4|@yue3_g61|=35|`MSCa% zcbjgNZ5rk+7#_H{V#9(3u<6~C!AR#WushwliVPgJ3MS{XAKvB@KWMZ=ZAjx`C;f2g z>0CNiloutA5IbtJy|ZK*dI#yo(k^whVE>8#dO@LMq13IazZHKf>f1t+_l8%q?C(#z z$C;T`!EtuLP2*|Wl$|nj`#MDqk6wP>VHNx^(VA9VOfN*n^U&ohVp%Y4W2L5T8M@r8S9#rXa4ij zz?v<+v*5Rlwk1I<{USI+L{Il`ITT(U2hYS%EM7lQ%}bTbE;0xpb%Gl$S}ZuM)2<;O zLBnTwRge&*iPY<;3xPU^EjT$M1FH`KgdWe1a9SgD^_B%)4-OZTFhq<~o4t5=@rIag zN-uEshdn{>&C0CV){*+KAv78Kss{gkV<4mQ8l(Xz>aCV2@@AP}egG1z!hZI1=e zqm%$^YZ_ri>BLG+72?txms!|xD8%9LjUa!a?FTTocG9Y?L9rM;aG(u(jvKA)@3K4_ zoL{CJ%Fa`X2yUqFnvO5NS`r+CrD==)frno4C}r!H(?w;HKX?jAoY3NkCmfuZo-*mc zeB>IVKjmF?Y_^-9dRWQYSwAOvrFu2A0OHOci2jAM&MATRY~pr@FzNxMvx&Wytho4) zng;F0pY0h*HRmn2Zt|f=w4L_Pc4$luMmW7D@k16izrK9!h_ei|C+LK;Tq7+sL|9mT z5VVq6&sR=J*G@0dNzj@T7fZug>SH)Vqv;_y``YzC`6Pm=od&2s9;5eYCWLFR5H*}3 zBc-0$3ZLK|xy34(!6Vrl5=y_6m$}&l_eD2=`n6_t@yOl{8StBRI~s63VJkFAa+tWw zu-$yK&rzFthPRU3X|jMA?AWF-bNQI7hG)xR-!w{xQB)H@<|FfE=Go~iZIEr5O?tOi z(b};d-7NxJAQKtuNj?qvO(B9o)JJve_m=B<#ec)rwpQpnHMiW%wvvT*Y%M;VlvJaT z>xlrzoRlE}{h0D_FoQz-1On@OScc{>l3i-X(BR)$AnTg4n3tU$uw?61pL+h~7=3(? zG>yeL`Jh<>YC@wwnN zNG%*FAyx#By~19;B`mt~3R;&k)iJIE*Y!47n%;S!nv8(Dh1X(54tiJ%GJLaJI;FhP zF&JlV{hWWW_kKie3a}i&tXYvhTWAz3!A!B?WnY&(g!n z4K40?M2q877inn;*{eaIc9L=Ly%OB6tFXEEE7AD!Pla3VG?Ize?emV0Zt@K79c0Bq zte(~IT4-z^&&!>QCCk$k6ws>=@ zh{ri6El1%I@&wg8?;fjuvKr;P>y}3Pz;v@krxi|hbFlf%U;W3?_^;x${-bkN76Owv zM+}mKMyAFd2DkNYRtNOI8ivR?z=-Hb36fpovd_@YQ3UT|3KF`UYkL}VD!6}PmqKNp zM&8|`cC{W z=m5yauMfboaDDalL29i$YGpraC_rWl7Y0@g^06=K`H>u+LREbl=N_omdTFEDq?TPJ z)3`3Fl%`|66%FtB@>JSbvl}S*!j$h+bH+1JF@-3T<(EHRQ2gr*H}4~FlJPxa_vD*5 zzFaJ&olTw+@nvCfrGH+|xbB2}8X6M~;_wT;wPiU9o+thi+A6~$ng~^N zU5Rs1q%C3RFcr9Ti0esc$38nZ$@;8fl-WFScUJ)w!@#)@hJ~RY{JH&gemwLobM1I8 zHwJu&j*wbFkCfbn#&aV4Jf2{&W4R-3RYyV{YVKlkOgNQB7$NFPM%MBM`E0#FO7=pI zH8z}c2viiOUNq!STcAfKhHK`XuiCSTtX1%~1aUnr>F_8#44SvskV%9~kc~xGap6)? z!7)%tMwcm7=VIqV)U@(G8BK>!rnCNpNPt&E$1knV=LkZ_Lq`gbE;EGe6kmd(rZu)z ztsh~~9c&l_nUL0X`;0gl#GZYRIpO3GJ_Vc@?S5K9XYIFU-&M9ivb>W2!Ef*CyZPb5M}hHq=BE`_OkRX%)NA-FMJxIz>njv z^1T3Z=u9byEZsqPs<16t4{#vuKT z( zy184({1N_%Wv4S*tznyN`(2p2)+ywx(V_CQ2lXRfiki0Av+2RtZX^m*+aMvG^dz0p zJ4W^OK{u&hsVTyTvE&fJL+jltKVtdfK?zZ%BFCLeMS$X7{b@7UOm*S#;!VNj`ugw4 zn#C8o77vOmp85ih>yTO2DW>F`)PsrKhUNLl!(>df4{nF&rhkt|yQ{@xGl9oDXV=jS zpTsAGWB2rDC$Pa=jJhf8VYW+-X)O<__ovc_E4}XvRu#I055Dh?;&y;4 zqpNb3I0siCJUaK(UnF@|FW2&usOU}1IBqFJDS|UOV;Q>z{O$ZAm(Px^K-aqWv+XOUbJ}YoDZ}yIb+@20&CCTdyCuc$ zL0;`)nOe5|Q4Yf=w`F1Sy%gNG=><4U=oR$z~i`M}E}yj9xyK+!P~EL+|$Bfb21U z{vxCAaoOYWvgYk}TBWE+NMo-2bGTu%R%6yi+66VS>+P8UWqqgZAaryU@Tbz1ot4B+ zSZ!=wq6?Po5hK>(%UNm9)4ex(37=F|s5Qoe^uO+! z8&uW;D4o0W#k4%96&biAl_+IOj*quD@ORbt$^aV$&f7V%Xa;Do{Qf#y5`c=52D)2VTFg8r! zUgGD*w#|M5*&)ZWW2pc^=NX_NjB=KSiIBduhKa0d193~YJi(aQ7&|&Sm>5|9b7pJ! z3x=5kzySEy2{RMZ{{cDw1G2F(uyXzvWnuUa%F4pbo!V056B3h7c_7*`S&jZ|9nKK)rHIr9GpxXXa#MpjQ_1AY~pC-U}5KE>j1;@ zPrr%)dJ!8VTVo3wGr<4!|9>EQVGC1J69*F;BNIn}7J!)P@qLfQR>j8R zTh|1Jh3Vg2iCQ=~I{jnxUoE8#{+FO;VeDl7PamADZ2!{#@8$hV_aE>4V_w?C#>~ka zz{K=zNYujWd+gueRt8Qc!Y2P1`Zf#mAK$tG>%{ij^aJ2Ru6TlDHuH#1v46;9Q61K} zXf8nlc7(-Y0ik%ekIXa|g+E?;zTTMmlaDVeVfg~zbIm;`7+!avQd;6guD*n!oHZ$LoY~$ZQJ1Yl1xcea{l9V}Xr6K-TGYNdI`}|Iea-h3mh5 z`X3tnPeA`2U|9p}Z-V?k2w-Jk<_KW^`F|#uH4lKQZqmjus;TV}1zr*s6!G9!LmTz9u3mMpnn^>5cJHfCqa(+|N z$;4XqyCD;RUj5%VfP>+?**~~Afc0Bl#=z}gQ~)q?uzhp(AHIGE+62JD{4WZ92lzjC z&A)*EUhlto!3bbs`8U!2Ei(aFSpT=o3}9jV-!g!WiGlNbDE}8G!TfI~6}ExPYwO&y zMYof1^g_5|ZT$FYx4gsG(%vow5Vv!6b$$A2=Nh2pw{yUM)!r@R^0e-|NY1L#d~`bh z=k1iw`*p+fOfjCSi~%NN1>1k%!YT_}Bh{}12wuTaNX#sjxZrnIBeY*|SW*&rSiIt5 z-zv^MVW4>8Uw{}nEVtIy_OJYCfRI_uL0}%Uy4KYx*SQuLOCfS`&WlGoF_GmC-2DZj?&jtPBt-6)N82;Fv^6-g z);Ef03}RRB*Z_j%4kS#InU&d_Z4IQ@G$j3HDzvhObKT5I?ngWVeQ*N9^x_)KFKZrY z(R=R41GhsBdwoSczOAO}S5p~AJOY|K1t!nZ(CGNVj`(Ha7aSFbj7M_KPqYvIyOlf8 z>Pr%{XX<{N<~{F=zZCL4$%91F4{QYE0BWjmR8QcuQE<;C+BCOY5elrXwg%LEA}i~{ zJuOY9SE$$*o{=++X*D1-&7zl@%+!+Tmzfc0owJLeAuxzEO=K3ZD1aGOi{z+Vy3}JO z5Qqi{HTIOhCNr)f)Y961Oj}1DJXB8wRX!yZwdmIs z>D_wa@84P6JrQw%QLsHDQ$z6j28YJrdg?1cd|x*5vom|XQ2y?p39*TlrT9BJ12Wp@ zUlLmngshOcFYPQAz2mRd8wVGew2=Zp>HOulzRKKwU#j##_yE{5!a%N{2)T^UeYf2j zv|nHKU(J$-dS!2V7LsO zXfp?BHw4JSF{Xx&fRuzR$!GWvel?K+#4KU8V)L^S2KBrVdqzMGUSb$k9AS;9wPF)c zfi?y{nIP*}Ut-G8>sgzaYin3RYaHxA;PCO`d1L){CP&A5fv*jL)%`kKK!1K0ESivR za+%V{1@go+BLs4rCjMw-UReQ}=<^Nx^j`sl^!;%nt348k5^F-FxF({8l^H|Fp*MoSNlX*V*i+X~zUORw0wEjDIq+>0%k&CJ9R z!Q)Xbe`0G7QN%Wkw!UPtFA@j8>i0qWj2|i!CW9Xe?}#v?V#mE{r}NGX3$J<8b^s*% z*AsoghW^%Lw^(oTIXu2V(-J`Gcw6xLy*t>K@VUE4pMB?n34$=FTV$vHjy^4TGpDfL zFTvRMV((rn6qm;CIj{%&HG$s7j_cDrz}7Z3>n9t=SJzmD@2Q{m7in09KCbKz^JC80 z2)yYHeP+*j)+D}w?`n z$6;NDyn2(Jd-0!G`@n1>-}E@GtG!7^t96#^y-n6@)%Zy9kRhiv(J_`*Zo=71hI2ac z-CZt9ONSrp#qGB_wIn^byWl;EZuej&Xh`|^7kpilVHlv{+}{w$ZF0lI+lP0uGH78rdIDF$hUVXWjPNuCb_JN#ozU2Z`N481nwO~Qo=DD zXT&aq1?@vO^xG)E%REQWex8H^5uFN#Ku15bSB%6P9=+L(1UgJm_rEJz#~yWy>tonc zZ?Q}U)8rQ^_=>rPBAV?zSw0`ct&yWDs>rFHT5a3Eu9&Ua#Y5O`5cT6X>?%jnc7EW= zp)7o%F)T|Dt1BdXYqaxX;vTyNgm}H?vk(07xk2bv5P)mFS#wr0N*8Bl+sB)<-ApHc z3m99kBD$f4h`VX0f#T>Z;b7#e{!@+7Eg4pxbFveAZH^RiGgTBj=jgFT;}*>~$B+26 zQ|_s&#rI0$L~p4Ff||-pIF%f@gzG)Q!8B1V0_{|=Bm^&=oEM@uKX3HvaI_+(yToag zDe@EGW)z?q4kSXw(=#YIn^rLRlwe(d)BlDnMd;S&GV`-gQMR8sTp51#Uf@E-NWUCw zBsbD(!|%sj)a~yZjPqMLUxc-+`6r6`B_wYOG*<8w zSP2+HL@8y`s-V*4^rFNS$UsW(l32}dM3k{{nV=~IMnY?i1Fm0L(kBaBNxb?dXC|hv zKL>p;wQ-J90>zqA{F6LLTe-lYID@ZE@iIA>l0$^4k*26S$|b1OVas$6ML zG>>#3c<(Bf!&wR_t;$4_hC=0WEm5Q@Z#!R1;>^W3CX>UhFh*ZBB%)%eO`_jKKj5Of zTa;s{Q`l3A7ozy3I<1y}UW0wC4)@~T>t8uPLi80zD>{7Vnq~XVWCiv^1!r=HEO4l5z($lXMpW?JZ{ zkG|=nNx#c}+CdbiyFHGnsoTpLD&geNmmmNAkv|1pr6PRkIJ!IACo*HGOtzG!Ymyk@ zD$pgZ*q5wByA>+L_e=70F9NtR*Cux(@BPflj_z)z#CEe9x01>YXm8j9-uTbZ2lAi# zEmLHBr&aWZq$XJpREebkel39c-X;0_VBsH&aiSfd&BP_P*;GB%9KP*{V#M}P9H^y< zXBF_F=<6i-Fg&C`#tv$VY9B=Io?mo~HwrfHS1IXQc6$IPOnIs4jVLp{VW(<$;bSP# z)hR1>P858F#GOaB6+IcfVs!TT=WBG|@6tGl6s=s} zK%L34gR(~oGPNzq0b>fY(^N)nu- zTT1$T$)QX6l>Z+9JwU?0&*%8HNt(4d26-=+0+y?_%XbP~uTxQtM3*MFzqV)RV7&UV zJlL9uSlqD!zf%nEYtIyYprg0PmnA$Srg4KGr~RQ($SS8$Tr62kGTL_VooEIFg5#3p zhq@oHOc;|QaRBwvr)ZZ=3@w$xXd+lq((gCt!OQW6&PwM{!Y@{o+n)K(rEea?QjpVf zqQ?D?=qJ$alk#akD`StQ$2oiuBVgg)_aa3jmkiv?OmlVAzd%3vQHq_o{!^!K8Na*{ zw{WlLd7=J7hj2GB$IT=Q-m+H3p2#@!ASMH&d3Q5mrkygtC=(^)WLT7L7_N}g2Ck{_ zFep1Sb-OhXaT#L^i9Eg4VZxq&^oYMhBx8BPpYH5&V;+qmlWVs7EOU7-x$od}&ig~H zjL_p1V;(d|QpQcf1_fDoAr0)arE*7~8>^6fmW}%wOjTNJ*qCsR`aH6-O+pYXhGa|=g^sZB)j57kB zDzc}WT=;xL=&dv5_O#~NjR$WggO@}i197I~=%5S>DJ)lit&Dwg?Dq64BCe)J6eQ># z6KceV30+)|6`?&m!>p?mCG;gHR?6B~=iwuHzNZ=`8+2D#aqljy0zTrcI@e?=5Hz_& zHeLjvj0l>rva#T_#_X;%i6MC}gS>S&*jQKv^hxu*m~0kU(#MPss}^V-O8_U|wgp%M zitQKAy^OdqERk6SZG-3P540C{Ri{pKyecjsdK6MXI-#@@vq~+m{ ze~eKHJj@h*`KElm{bC|5Yec=`@k2k4d~tg4eE&LZ-##1pmG!XdVuSL<+p?qLJ*T)W zSBx=F5g~6c(TQwHl~Tv45r?a4xQ#UQ$A$$#g(JD2L zrd?Jwd^@d&d6Htx;Qxpmg;fb(npHC&uZ2-YX?=@Nv_QvHGg8b|pWU6yz1@hjJ6h5h zEeF8Zk~c;sf-|4Y`LX-y2HhM(l~eAR5&|nr--1i^Ih&Pw#el2Wo963n)$#MxV+twf z4EAt6;Wr6qM|GfM3HAbnjn1!bIRC;!=~@38728ah&~vigvWlS7IN2qJ4|odjRT`LM zc9%BYaNMMODy(Ui5WcrX%G&p>26c?TdJ|{~mFW7|$=(wmQ41afxIa0Ssc@M|gt}rR zu3)I@$KW7wp|x?j+%QuQNUU)%^uARq4Ga_%BhELh&#gGX*|%~7x2WA$=k+J`miUY0^BNToac78XCJz;kN-!Dr3mk>)8_ zPkL$+(g4%y9GKIj!N8f3Py4fIDb3#WQ0(}XjMs*$MLKbXIBI=fvACaQL+q#$aAc;} zkKQ5IN*Cvb?z@QfQA*S@VG@QYjs4kk3(bIGbOGk@^w1#yifM&!VzankiSWDosH&pvcwH{?5n`ch@3Pty^FKyy?8NH8u z(Rr=V-I5V`nGGqg&ZJGrB!{y=J}(%6^M|{>#)1uqagm=NsIGd4!Fgu4hw*%a#1#MG zEBnaKbr3&;SKrGpA=eo6RxjBLgiC{ue;Dq-aHiuRF4YSc&R9I9b@worGXw^LYj){7 z4C6(&yIfzm9SI&hju_KDIn)oOi1mRG_`H^LgyRQ{L50ssq!VZ6Z)4eD=Q_0!FSMv@ zuWvWJbF~VZfj!S>;d6GSdf7n-L1J`1^OFG+N_Dl^PI6yvaB5VnEW^f^LIt@8d9$#+ zU73%B{k>a0Kg~|}L`A!*i8CHHA>X~eH*a^tZ+qPFIhTCJ%FxE7#Efjc-HE#tRwuAc zt;2QTt*%Q#MkG+jaS_{4cksD~i2)7p$t3ec(?@cd8mD1?zYbO-Egrcx64Pg41a*y5 znzd=kclWK+TNqFXdiWNI6y>er^!pEzp+7BG^WOl4OncUa&vlwbEaQE*lihQM>rV<@ zuxB`_iGIzFLCYo?(h|U`J_j}r3^O`6_;w%S{E!LF@>jh_NcV{?^W`8p5`v_yE5FO@FQU2ou_m}q%-ag~P8d&aH zo+We6L{;XSfS&2aGRzmQ9arg_ZZ7No&%;Dv^`&bSLPyW#4yZOgK?og?TBKzX=_S70 zkf{Ng=dk@vDW)|whkG`_d(G&I&*+!=a2s)+xS^4(Cnj@7+&j^M)W~e&&4VoG=G@a= zLt!Cd&~Kdw&Cjc7+O3N;rF7I)n5IlEWGAc@b zhKpNvA#knfv!^DvhG`xo*KdT$!{_1uuFD0L*Hq6sR|*kmtO`B!_N?eYLqljXXzlt! zz=Vx|ZDWV566C%0Wmp)&}G>w+%>gWqPYM*TMK-w+gCEZ^J5{W$Z z`J~)m#abQ$w{y&ohKNgCZ(fTTlR|q93;Z>6#QAWqHxG;X#E&prGwHX(TFbjf7|ipQ zaPrp{%v#WE5p3caW!3ubJ)#pHIZiH@Lgx^K<+ncLGhyj-kkrY!*ODv|jre!s)?H`y8hh z*{_C{z`T8MoxxLUTo}7pTb2m_tAx3vD3T{k@HgkA7?b#(iM?(9Cd)lw?Db1}Pvla$ z3Hbxn-MpdIt33txH}&t*dpvJ_8Oz^Pf+VMZTrX&})Zm?^O#qlnW3P| zpXh-zkf0P0DSoc+bw@&xk6M6TittpgxU<>+s4*JlqWy{?W!M*@8@AElD9wpZkn#nnn$FRV z7WHy5RtODVjH3K)A`YAqobB^%sz-ha<_1>mLrx`mFRwp47vk2Ohy$C6Dfe-QkOw3q z-T>RSW@|UOBk(ykK1yc*EJl_yO4IlGiL#^wB$rQl$htm(ecizBC^ zrFmU_d;A&yFjWv;NP^pN$_eE{P%~YQ(YBZnDmF5USpd3R62~Xr#(p9vB~BA zl@ogEupwv9@;HEmQdeUC42imFNnc)@VwMtS9oZ-4k$96 zlUJ0WvrQT@&Y@McBsh!4^lgS?Tb0{e(6pEPEbesigH^)BLw?4K*s~+YL=l4v2OvY$ zCLWUNT`>XdkMFdN;c9P$YtH3}-#uB`6pM;bLuw|CX57Ayy+ zd2aOV9X<XzXaN+TWbQoOT|LrCmek8?j_q6frKT!l!?grt^HH1)1)*T8$;-%a=(OSH z$guDF9DLmArDG#L>2w~{wz2!!hNSV~nB+Ae;e4UT>9!+~@XhifHo!@>M-KYhg@1fT z(fbgKQM#Xb6?Z9ScdzB1PAeyLPzfw5dJ1c&#~_pcF=(hG&>9TumS1)H8NQ=&gaq={%z8 z!C|PjRQ%utGi)`*oF|tFqPRr(QNcu4OP|xY{Q_gmx3S4Hc*Z) z1c89O+i>de*Uq~r7Df#70a!+XFZo|NFPkAw%wYDSX18*7KA=vk7LKWr)!cIe>g?1O zjZhc}dC0fUH>q8Dc#y`&?J{)i_tiFYmoCT-y5U1GH)GL19I?5m36|i4Mue=3)LI=* zWBz{HWdq-l8esCaz{22Z=*?7dVVN6TdfQ5s?`g_&b@ueQMzFv8M8{)p0NG?~hjDDs0$J$n7jsNYbS?+5FAeGGe0n?L28C(P5R?xy&j~Oxq{wlgt7R;l=5! zQDsOwMuw4CzTGM4SCsIIz}`8jaq)BcNF6#!Ll4SD;pw{;Od^~rJB3*q6g;5!hVg3&T3HbH0VJw!-dHg{eadzR2*f)Li`nhLvx`Ji%{J1ym zq^lZOp8zI<(5&wH*{&q`eI27-zJeHJtJ~a4JUe#LZG7$@$_xxIT-1tMYqejvgroBX zIp-3QkS5Q*k+h*TG(H!hHT8AaZ|GLGrHFbak>y~|5o)Y$!K-S#>TEl|{iMTO^0@5W z{n+`5Y}NO^aW^kE=z?E_f91&Ob=tedEVO8T4~qtiC7^jb&?n0Y>B&{4sUeoB2ydI1 z=2~asoBx^|LJyvpk%5pGbWLBHDX7fAWMn-jS)TMp(25X`+hd%^Rfo}Kb@~&A_XJlQ zwTh%(%x?Xg!wpH3Myt14*}x2=I_7rX{hSmJvaP8KqHrj7Qp*xz$@X8u(n&}7A=y6~ zwV9Y4L+Rz`V6ahtK_DH>(8Xwfk-{SAdLxlEzl>C%+Iy7gPwT8^NbY>fmbrNW30XQe zcX5mp%h~@@w&W-DwVN8agz>UIjne|%9~D6!V1P}t!r_MoTrU6o?T*j#=qBw4EOD!z zkLYpY3i2=)Bo|-1vCFY%-I-zR^*kG*lbx&}Rt5&b*VL1S&e}BdHM_p|`#3csjL#qO zoCMFWkeF0U5a?1VJGHZ7he#P`zQ#BT95;tdq2a+THUwHYWW@3MHj5$UbqsNLG|jL? z+uF61bsXgaVZJgXo2wIC0SH0^_^l$2ajM?IjyLV;ufR^r?HDXekR_S3X+&3^CE+3D z$`y8ILHZaxLt@C3?+E>5a(T{A8^A%ewij9d#a61i4mq+zKze^|aygo7(W?qzi47rl z8)0J+^DOY}@iGE1zENSeY?zYt>-!Apl-*;$HWoh9;d7pkJT{)Kokq0=ErKIkBZBj? z%I555h#h!;NY!C)P+eS4d6?a?uV*_yw;^jDjYC$P-|3AP#5L4YcuA(H{T|sFbTwos&cBz%kO1wdH0W`yAF65h<#QmkCsAj0e;l`*u!(64~U1AbC&h@no8f=3Y<9Wg7 zfTiaA;GEC)%NzQJ+rM%uHq!Eaa`(eaMx@draL4L|AUZG&nP>dk1oN6tCU?nEN!U>@dn2>n_~BZ{ay8zEz-wulRD z7hYrOd)>ZEXP%Bg%(xbHNoqTB%FIO~R= zmB$)Dkt>f>UHhas)-ilb>wbnYubY!Bd4K)LIaMFO&gEiUd9Kc9qxZ?Hcc*~+#uQo7 z;>u3UKd1@eeKtWaXIIn_#%&s&cIqM{63i}=Qi|vl6dW8N=y-&L96{}p4trLQ*ku;P z%1>KHK|(g`%IqMDGlhhXYroQ+lMH+v7I~oYqmFN-ee+Ydhit^#za}lWJOI1pAMcva za>wn3O(zAu(cosf0gM$jVY2&PmHgRtZsV}EFL{G6*F?NOHsiv~+`Niy9MGz?dH(p}vkKV5P@)K_*flpU^{kXDDQeF6qoNBxVbIxrnC3>t&q3m!c9VX{zi0 zBq2;no`j(ponM0z@7@kWlT0`o5IZWdl&k{zeQU34*0yetKKIit%LZa2y?w%B$c-SN z0Jr(7v+3QTA3oevF$1;RQ*>8ae~^|cfckV273;Lbn;1rB2sl-n@4l}oGp z=rX}TliIQU?gYOVv4P2jL#<1fxk=d-!<#vWUx<)KBLHH~6lJYLM6T^+95) zW;3{MzLpEN^J%mkY#U^CslvzaEO(jUhIjBsTg>VUA!NH~=g8?omk4bsD)k~=p96idGXe_^tW^cuq^AUrn^$*h?Zq?L+-H@b z#8no^O@&bS4-1p8HTrRtl~cVa&7XW|gbmZX<*2+MWkWY0$(@^gppLTJB>LbU>#Ich zbn;*g=Pp{(1JlnMch?PWMFqFNJ4VBk^q2?hOSaEMadj#k4^zpBP13hCVY<*6C0dsx zU&Qy8_i4upKp;}pZWC}4U=4kg+8N)&Dm||Iyg+iV1m^_*SW{V-xstV*nbly+{3TT3 zj7sG78afK7yVK~C&w5=QbZCwulF|-!xM@r2H{ghpl9p=bEv=OtikgXot>I%{P~qta zBbsLwPjbH^F_k>mo&zW2#1rOo)G+(~$!1kXM_`36>T)?Oy2U&2p_Ymh>z)xPkgo5P z7p!q*WriPP=LL$Bq# zv>}v8R@fgZk%|or%bRet(>xAinkfUGfxBPg8Z+L9lTgqb#(USv-Fca)`&jhc8}89I z)DnKvj%^Hdp>f0d0K~qw5c3~!apXdrqCr>cAcq*Vw6rU>N42xY#1pQm_>6b+;CTuq zGM_WSLXy(SPmI?!T;Nzg)vG&VH9@SyDO~b}xxV#-1D*F3%LmsQ{phR#!8ZnmCH^$b z-MV3o4J-5Q>h3jKT2{hchJf&GJHaXVI)Md19DZa zGSZwVJW6{mM4e2s%4B!(V&lY|QY&?P`N;78n!!ERA9@YqMQ@&|Q|3(>wJRoH-}q=+ zPYUMEM#)$F$VsJ!=$7*r$phGXWQeq{pG=i0ZQlyExs}Qei@~=@nzNz%WD=G)lYnB% zJnP6ewGc>+1ngJ(rF()K z*nZ+{!wn-L+H(SEP8|aXm^-GEmkZ7P!pq7vToBIv4tM91 zTL*q!WX&7~vJ?aC7<4CKjNSG;SW?B`Qamm9{Csm2D$WJ83= zXfNwId@E^dNR`#6e&~}OsPM3a-!-R8^<1anVK>?)Nb+&e1d?hb6^kD0!B;MYQJHmj z2#c$m-AcWa7Oq8qt&xp)@r(cN^_B=NfaOZ97H-MtG9FH;{DaM2T>u_tCS6Il#>Ds8 zo=_8Af>qqm6$zfi2fuGV9P~gTO1v$dmx0ZqWOKps>07F!sl+$D{WeU*D!p7GSL~C0 z%{JHXiY;6TJTttKI%#Xt6Od5@fAnXk_nJr+G`AaVM~go8BeFoW25F-bH#FT%u2YDQ zzPCmun=pf&Gl5bcV0gbRSe@~B=lM3o%9UNUQ{TeRT!6!XCwKwrN1;|S#Cm#Fbm>u{_6?fbp)lEfe9#d&2I%;^?s6n!Cgn?tn^+Ohk(GVI5P}m zirUqZX8No~SH7b5Rqd@RbAa{xWuB4@u27zA|4r1F=S88^Wa3?%;6+jdR)qPeIdEr( zQ)?A$ucGgSjwOZhfxtf{fBVh9CE}iszY;?8OA#>5xT_6)m(hV3f&5WXseVzk`eGR= zE$wveD3#w8-*s)$sjjXBskxQns;fNPfN+K3{lyo)pQe_A%e_@o9lSFT-0W`CYGO6g zs`sH+RX!wpNKafk)lLdnKAvrh`rue_j2pUqA;Jk!+nCos(M&NRR^S!%qk4EouBUhkI-nS^ZcaKDA+5`V5@ z>ggsgwP5sF`2f_hdA`hP^qcb$2&jJG2o}phn)1bJeqOZ&AWDe-BdZwop^iZf-n5T8n3`wt0XI9R1Vxw~v@3KHE9H?E|~0wWwu zNvl$=-h04&Fr8SvAdQe}B$AUdZnc2AE)h#z6s!jdcsYmjkp}>)Qw#X5j8T4U95(V_ z=$9n7%Mcl;``W#4DC*j6O39Fr!xJrV_-G^fnd?KoW)!`S=g?b*YyWSq>sCXcs77s^ z-0yG<>H;hWFJZ0Tp?ko|t0}t+k7;@)SS3ss0SIKw(&!$xEG$V%SF1sXl?znV_X}{m zj*g|X!d*SRBwrK#yQI>cCUW((Jd-+#acrKN2zuiAtUv$9VS5hpQNddtYfvdZyzm9M!piYI;q>Vf2VirOA}agHYuq=u+8jPEO( zeNawXw^$Gd6WV$*O23d*(#`r#UBfO7X2hXXuF%6B4rMZE!CIfXT6`R+M|^bc z;6xCC(4?bGA5|P%H!U)5j8H($)%2irQzaUB-e6A4npVC67?wm(AT!}xvn#f*WJ1Oo zy*cOxu-JtvR_{V&%V=vSatgK(7%q~Dh@fE(yu~&yuk&m=5dYepafnkd_0^Zjj_(3F z`Ww8)_a`9pY8Za!$*=>-9LZ6=sv*T2s0opusAY!8^@$>6JtWAb8#?;2TDK94TZt=N zp=!D)FqR^on{V)L^B5sLwOF5@KsqA{CFQr!MoT0DEc>CoV{jhBrt(MHy0=P{e7ARMF`y?Mwy)Y}Oli+1AN^22ViG&;qg z>&31)N^x3_RVf@wyzR{Cv#$rntSJO(Ms?In+7^i5KJ&%p71>E}iUiYu{?jtXu0cCJdTb?~gG zIom@+8+N5b^x${cNo#&B@k1pQ>-DjZm~;|6@y<^pkiEnn(HD~%nj{HXb04YQYL0t8 zSj;lG`<|*MHh(okc_m-G&v^V+6p6zArK8yVrrIB8u}Hg(x6)}c+vvyB2S@Sr5ph#s zPfLHF?)J7F8I@ridABe`zF#a}aA^G{4-q+l9UhK)ZwtP9G#`Zhk(w1!hkkX7HVY-6 zBw2`>k-K~GJygX+O^ae?dJBgQMC^$AO^fuM@%cnv4At8DNvZmR+vpL@0@TD%rVEKs z+lTt{eRWa1a5A`2vp3M!>8<_Fjjxp*CJMSONK1ll@f#XfF+>*$#O*J!=L6r-<={Sp zM5WpZG;B*V5clFA3)u^=aqEkpiFOclf(H{K%9aq3zJ>!-w@z>&QxloU38?z95J5W%OO)&bOy)%efDzh678fJ@4L-Pd}nL@@YU3D|nhT`{pa82J>@A z&R@hQ(9L>GhLG|MbJe9%M&6}+DF059?|G8f1JYL1dwu1KRZU%Im z(S0kd;SmdSs{Vw-k6ENdlPc#tHbX+6A3AR}u<-S}ggt5{W3lV8y`b%((jSxr7xJN zfS_ofUN|=4U1X`PYDayxUS&$MrA8_5!uo3>T2+CaN>M8pw`iCM6(ON{XGYt**)4nr zrHiNO{t8*nFP@2blwIZ4I;CGd3IK_g>#uuqT~K)C;AcvS`owuOitdRc3_qxeEb{WI>QidqSFl?TQtLP4o5{?!U3Nb5{BUxb{l4G(D%ml5wf!fZ|p=t@8 zh!3I63{}igmN9jf7-8iC%fh8}gPu+$Wl~yJQL*^=71%LDLN^BJo2mTgA(nirQ?z_^ zVf2C1@J|t~kA!WG$pzBnF$k4t@yb*`>r|u`U9I0}q3}Nnt4pCu-aaoGkQ5r$EZ4qu z`SShOraUQx-^b3k!<~?ZPDGUywmu;WN&*vwLeH^=u$V|Yn&by3ZapuzG-m$&t^#R+ z5&z{Gd5bNCM7?nE{&0N}4;^T8W$TeQvzh|w&@_@pbVI;!F?Q0R84IBDx9p?+@IFW;XTG` zL*%eY%`4wRH*n2FEuHysKegSXe4&yc8>pqJ@YPGm3C7z0TB$bpc!@wwD^PkY;dX(klbuQ!s zL2e}bxVI%6<`Y%^HNWnCm83gvTijLhKO^MxeKlK~ly`cd2~BXcs@_1n9m`t*D z;Z_>w2;ZUQe0nl241%DI!czOi7acH!*WhRHDBY#V+FZVkX)S$zUw4G@mJ|p|7_Tg9 ze9@+!IPf{{IyE}Iv!U+I1l70Gv#kgf2QU*8->V5e9{aUxlHtsog8xf67ccTTI?94Yp-1jvfv3T~Yg))@U0*E)3R^%aNVl|7BwMojJ%B~K`3?v{Q zO?s_){EIfN%DUhgxgXN=+0^VKx?G8BEY}5XHF{SoB||- zuJ~!k?020GUC?(((5f134{c_Hd47l^fki}$r89js8p-sWn~RGnFq=MvZtet^3!!9b zCzU0(tW|3a=&@2T(*G#D!nKon3i+L)PVU1Qv4D^(KFuU!ba8GiTW(R!XAzlwNGs>J zMr?S-fsN-^jqJx5Rqsv9DpO2a%GV-CQ*1712ZQ%Hr+KuL#Wd%M;p(Q2X7eumT#TI91cR@IjXeogX_+dg z)keKT(`IEZjzI1@@^u@zbu);}7>hp0uderx5JA z1fA@M29zmmb+=?r&o4OwyuEN+c7dP15cV%ce9=r`d#P$G2X4vpyRWq3J*X}{1uSL1 z9E&9+Rn@-Gkz)=RX7Eru*2t-k z#5QI*FGEG{_gdwD3>OJglW7xr6kN1|N7~8#<*H-V2AeMn3or;_S#fl!vcY6`5N@-$#%s#`9H76F*vfj>-*>ltZ2n6#NKF z@qtHP96-R{7*wdMuFKU;4sBANAiLlDC1D;!HLiZtfKg0K%S??ixhKZWWhZ2F)c7e7 z;GC&3N0IF5sMSSzd zL9^ZYYg+M+xbA&B{@bu`(%x@17x8M62{~S82+_e&erB}88PSr5AutAh6U=3Qo#p1u z|3T{S3sOb!_Zdn8AM+W_kq640$&dji3aMH!S?4WrqMIIRQyzPX;vXaiaGp8FH0L?g zy_j7jzg}{5l@%>HYfSvm$B!)3?NEq(BGWD>!iWKXqQ6pFAzay4vlgQlrIS{C4|(}1 zyNj=QfSr6sPoAneOWUILC5h%-qTr<8uwnaYj#xU%L@*-s~t`C@rIW2)TkjSw`2%E9YXL48Frg zu9smQf8-8-<2Ev~XDYI+C#M&46z_|(uR(@u!K?MuNN@ib{qkCwSjSH_2pF(uY8EdWy<1b%)n;7EZjM#?5eF zgl2+W*IR3m!MA>ZE?<$I!)gMOJkrUb3BL-80omSjV%f6YGOspu=|HzMM4|t#3Q))7 zSb*)r0uuic^nqMye!P-(V(f=f>5qXaekl6h0ueBn{gOnl6lM6-HA zLOpnuF9daz+WSY5Q!;g0+`!2x{QzL^tMXEzkB0#xs68KJ%MsD0@1u1Hjb_M2>MoG} z{nvW6`1TX89<6OfA3&gH;bTk@5yLtIWol-v;e}62OhL_HJhv{# z9c%|(I%c*W+yC;7RGVv@1mAbG!gU5;!1AN{N#gRa*8W3{B6z(vWT?nayUqHj!)QBQ zdxaK<4v7jZfA5($)iXV$ER-?jWrWiK)P>xa5210fWyW`Z)FvfWlCs_NurVOb@u}DC+309@9(g`lg+ATjWhoLIX4Z5e=lyylYUH;@2$e$vJYt z;rW)$%0?jn^b*g#Grp}ynA#KOC>euRax`79k+`MdOPu6Z zm}H7qZ|EiDkmFrVfX(jrTQ9|q?l6+s`URIc8-YPKSt#os{~*iKX%QXb3E7r6*f*RR z;=W0&7;w>a(izdeyJdYOZpFf9q!zF59T&m1+nu<|>Hi7mLC2Dm3}JHHSYKY*flRhH z-A3H~>aK+Fc=GC#jz~nP@9Q1851P#j=ctzJe9~2{U)1q5e{eknV z|Gd2j0yjD4uoUx&1mwXfPSrrQ2zkWUDPlENlJ-T)c7%AZ1WU}0{lI8MpS>m$L%c}$ z)#^5UQ{|^ls-PU-*gaI#Fwa}AIJMqbEdBjzmEIH@m{jYF(yj+~HSjD^B zoB_D1YIW>%M;WsdTCPS1_hh=S`?Bg%Y5;ah+lzhDz*uV9JXHo}L9_D6x#h}YDN7xq5gWZ*9I^FCIpV_yccXPC}W#oH0x zxJ^o#chtX*tk!dPXEriaQ$Cpb()|3TPDNl>Ly?hSE|III?f^BUyK%|1DLQzPLzfi? z+Q3pKgw)nhyG)IwXif{LDoZnilt+gl=+d}@X_?qIaeS94}s1Q#-flEx9MKvigcqM$u^+p)+&NG~5KGSu2uwEOr!2ih3UR^C~GH zmazJAB4JC;ilQU07cMO2Q)cMVw`-tZO^S~r&EA*%iLr>t+)K*>P- zi}|O-!yXKHeiZg|h+ws^u?-QcgYGWwVWFo^AHVxq(A=mo`F&hH;YayM(RSio`g1^7 zbAX!doX{S|t0Yvst#sjX+mv&Ky4*6?+7##WiMp#nGP>yxI82&*t!EnF3<}br{PKI2 zhVqq{@tt=xv0Kh`L-``vy|;v_hM$Nctz7U*&0QhW#s+De6-Gs>{;t=dX~Y@as*S z*+<=c>9;Lv(CLbv_M_CLPYkZ)iCGkqb1R5oyLg&qfQ7egXcJ&u=IJ&kC_|m1;If&R zpV_2YABelMkcEK+q*06acfIX9s)%Y^%UFjv`MlG-ei^ZBI|1U$0J^Tu(bne}CFqt@ zjQq)1Oj0H>>-9@~%@^>g5KgA^(UXAz;hpPv@BL>oMc%68HwU4s)v+oj+u~32;ruai z=CH&RT_C~hG#%tpIUg)Phqk(+eXQIS)$6A>_Syj~`AYIa8Y@y(a7Ybu!sXzLJLn-DfLsKY)Dl zKZ%n7r#yBP>?mpG8?(Ey52Q%+8DV>|)2*7e+DtD3t!Th^FcD}?dB{dnMUQ?n_$C!N z)u6g+!@@e$4=qPY{Ri)R)z7bslcGv>gvLI&p%C3vLCul}n1VY|h+kuwitBcE5OtqJ z7)$!LzNRnEJ~W3r$oNbn*_F0x>#X>9DD`ZbSgX>g$YkKZ(6L};pg0_$>84l3!kb5( zlnN1R`}Fx7wZ*Zsh!CZ}Id52Z!>`UF5U$k~H|-%EPZN~d8&-IcD_3jDJ-R-O2bDP` z-^C3xa8Z&3-+y5SEr5i+5Q@N9K7K5_3{o5U=r98*=K1^4r>rz>sgzSH&1XcVdn!%P z-q4|_LTbru8sT?n4*CE2z)H`DwaUMId=jdtUbrbeJv3a~YL5$bGbAB(a!ngyNm0y*GAfXHFy&H>fO95v zqK8p`B51lQZwkR}9O%b<&#EoMY!5{Nph4&zF6Zt$+8LIB(iu^ro_5NED*{d0vdcCI zvYHVT6c(xHkco;Z9PPIS1_H|NX}Y8f7Zf9*o)UWDs#;~Q^OlFGQi+Nf0Cx#lc3G?S+4Z&@miJGwu69r zNstbdfIlE|oK+&aZR=`|t0cVr%T`+ir6B;@4h(*hFn*cHj<1TNtNo0QY3 zDP{JM=O*H|x(AFI=HIYl+4F-y=SP(bI|iFOp7`tZFm%q+L#=vA=(pG(_c|Y%H1r~~ zj4#b`u7U~HEZ-+SKxbj*GbpjFq3va_uS667dd zABx3yZpe3IfSyJXDJqLC;e7PyT5q7sZxC5@eZNA~%wDejVF&d=d{|25aG=q615yd2 z2DpOg$jAz5y=^GYL;D1}(7nL9poc-s?yjUDO$b*oZD2XMJduBHhc-CEgtVugsgrrv zm2ca{3Ml>^;GW@Ncbj{F%8Iv)+319Qlz`6IDYlQx9&F4G1#@*X>q=vCK>On4D6gj< z%eo2Ju1~~P+8SyvjyrGx&MycxKwfe%R&eN(u4i#eYk?S+|e&N5kc}D)WvXiX_!bqZ8^BjIKn6IH3C#4bSkFm2)D7nF-YjHN=s@*1hEw>S=LRwMK1L!7X zgx1I&$CPIkSX}1NBwb63k>NeUnAZx*?$}pd-hEAFmxXPjb$mfg+4IPsXKC8reW+=uP%Bm7OOm_HJ=( z-@+vwX2qT~XbaZzBcN4>J;Sb&I)cK=%CuzzYUaub_sLGQH*3vc;I^VCf{ZRozFD@O zmTh)vN=3+~i}9v_P+e+XHObtZPa$8133Tmt+%w@Vd8r6liRbpB9I@!ym5jC!k%CHD z<~u%=o9nrC`V5yXYWOZ%$j5wBPg!Uks}Z48qIm}xRWO@!qcBs5_Qtg}94TszO>s$l z!E#|qK!bk=K;ec5Ib?sn$d`Zp%E9%3xZ#k?y9N~{_K5w>aQNc2J(@Eudy{hQK(CSY z^^9STkq=sTw%s?|m>q9hq-e3>P(B;eYbRg>S#~ix37-UWsmJS{-pY|-!Xiwr*}Ivk zA$Eok@~fo3PD8OaDEpxVgMq&Y$)36%Hn0Q+`^lbgEoiqaVg2EEZ}%7piRf9 z7s7q@xzF@b-d^p|@}n)_#-D$oFy-9hBq;(vt%UP5Y!|ZY(wS-y!q~NCFDO|#q^N;8 zqBvvK0I0f2^hGaC{6;5GzzxDo-2FitEBDjHV-h@RIFV}{-8Xy^q;Ci&-7Ox zf}?Y``RmYjVaRhGi@(SxQsc~OmMMc|bhTcO$%I?NJ{zyY#E}jIPYU=%4w$dewg@}6%Jd{(p3T7vek6%#5>hpj?RkFY7teT6Ogp=yi+ z5$l!p1FJXknxFxR;MO1wq8mlcu+?8T;^V|4Ts_w$jota}-M}4JD*}~yeLHvfZd&`p zi0)CKl2zwysM1(@MP^Aebu{T;W#sMMTLab6Eb~ZWmzI2PVnJEMs}mR9p$+`TaQA|| zt0gHr`XFrkeY%kVv)qXx83rm2vMOQEWGKRjcsNaH_OzAGzyQ-^%0>6jidPK<47_0k ze6x+*`_2y%gSU+-b*K+tO@Wrx&*E9=ccZgx!(Fvo zYcs^QHgs{4P$a|MsRbcw>UbD^WDClOGvd?MJic%Om#>HkuZ;1r*L(b2GhE||kdg`% zhYDmc(YG=nw>cUT%YjJVh43*e){*iswk*3?ohT$kDFq{+)@K4F3M#+}rtJ_IuLHwk z1Gz3qJ8YQ9p;QH}!{f-?j%zzh5`dL9=B;nj5tnu9T4}dX7$K8st<6wK<#&V>L%DIq z2hIonUDFMqt1|k)<4Jp40dv>PJKjeNnqdp2Y~Hm6)08{GU#t5V=Vm{JN`0sYHC>?h zq9{9Mz|DW8`^{5{u=`4rQTA1ir@dxYNHI*Ft|gZe@)iX<54a-{ix)VkD1#o^$|UGA zxRVTe3#&k6*-qmP;LBO*C;OxPW;LNdrg3Tgdf}V$84EO&{6!cgMHKx_fOQ5JfD$k< zukePVA-B4q6io&O)e=j!N-z3uxDtFpd z4U1dPM*fp0XF5;qZ4zgo)PFfZD=smlUxAw1KWH1(;oS3{sJYW-x|WcQZkw?{ zG7j+p`VF_<#&IN;hG#wkLVixA>3Arpap8LU#T@W;mu5Ije_3bf3N2zy`0SDpT!+rjN`f249u$S-i})v|RM=eY_*;ESkwO+Gjv;N(I z2XQqBw{r;^vEPrU`X!7f$8P1H^HZpHPtFVO8tA@dR+?Wx*)`!0zFEMs%-{lhz?Di2 z!=6jvVQNr}&%*Rtd0q}-fc^7TxYi3O_ZKNQ$*7iQDG|O%Bq|OIEO-=SrqjFhIZ>Cv z=qN>xar8{FOe?hEFTjk!TsXW{CbpIN)AW5qkfmC??moMdu;w;@6*Z-v*e1w-y;$nG(w4yc@(@2O;kqb8s>9!#OJjG#4uuWDGd?tZ(U z=Nw=&Xt@)&9R&CMAI$7c@-6JD*?+bLY~a&fBqQ|d4U&~o{2?@hYupl^r7+i~eXU3o zl?1Hn7w<+K9dNMkvDRR>l9bH}rNxbM(Qrjef3WJ2t{P#C3DVCUpGQy6+FHf$qfHFJ zF-08{M7O3T^(rJUD{BMhf~euI=y$xs{HuhpJy6Hpewf=g3@gzn?`tWw+Jy z`tY?RX~+RmWS1&6ybTh80Q2Ry{A4}LS&y?y8`e`VciMmzueYB-@?DDlr~H-wOI0~Y zzxkTaY?_OkCz4y>!9o6{!I)?S>*#tMc)ZCN6@`@FnKS*4p^+zg?|loLs( zp!TI#2gu)>k8U5z^C^gTPw8zCwN;{zC5X}W(<4-G+Ra5jB>J6d zkJ&9fl=PD;Nuy8a)3iL z;7_D;+u4DkIUq8Vl_QvvXF1hxN4a=co89^s&0idLlM)J@4hj{mRHoHzutdij{`B{B z;#}S|(sj+opXF_VM9P|1n>swxB2Oc_Cy7{HB142NOeDdys0-I2a?2l;J3pF2B-TyQ zr1wJ>v|HCk1T!lSuIp@;w*P4es9HT=6BR?%10m ze26kwzZSV(IoT}QVKd~9U+$ncf&Yml1P-f^lTWd1wO%@Io}f?w%9q*`uwo=ec7dNp z8AnQt#3yH^|AHu^1js+000aW6WjD(}+H|)S@T#H*S*YC_+fu$;C|iz{;_|FwI8?D~ zBipATPV4DD7a+xd1#qn_N$pIywR0Nb+AF`rE%7)U5*f;w*&I9?XHR%ha4?-FztyB* z=cvedh}^wg_)Q5_uLV6T7*fPFSjKQ#q9$9NBq36BY14eX-6~b@JPIk=k@PBe`;`5- z_nga_Nh)E+ehJ4JKs&H>ZNozBfuSZtRr@?41ADd5zO}s2dg9da@-TC6(K_-%S}$yMYgRg znbKEC%QhbuUtuz4?+P(b-u%U=6sub6re`2VvN26VJ!rx3nTmJDrz;XY1VkhMH_KnQ zVD#SHf|+Fi*Nm5*)tuOs6}XyNuo|Btv*rae(M|&3>Fi;?qclV&Z2a9GV15%I!BQwT z1kCY{K)b6DMe}%bf*-(6A~h-^Ud_T>4xNk*(J7{|1SdddYugc}I(%SbWq`pyv@YA$ zu;f+nPqB5vbgY2G#~%$Wj1)*?^R0MVn-)?jdy)3eZy(=Oo+LMFL>p2&i`|}|1e`|i zJnLe6mh4DGQ0c&jG{ia0U!2FB7M+Tw4t033kuART8T_qdC67p$VuYEr~)dpH~6xM=_ekfHHJyk_T+dZEWi0W#c zTw2;+!$bn3M`iBk!$VYIPfru{l=p_ZnoBz|uvoN#f~v_LCKL0QDD!Xk6pmDRHmX=L zwyUGYoT}!gJmPNwr}HII!dz4>j3yTnxgk_#ZxL^3;th83VNR08cyDPZxodKBrFU@O z)kwSU0Ax{z&LX=^BJu6-GTjF(4jdGc7~nClCGEuzXp#nCc}OdwvlLr-)i|8G{Kpun4-`&vFF53j;M1+n z7Y-IwGn{|OKWC|xGQ4}`f>^}9@q05rXQ!_5_w;#@ob}-=G_Wda1+%aoA%PAOk29+3 zkLqD2e80pKvL2BN!r|5VXCXpGrw<&SaW9lx+oiJnbL-fKoC@GylJwY4yWyCpdKX53 z@l7n5*YUP`%xOmQ>_wkV#hcE}Zkn*lDQY~~kAw2_`V~UZ&xVKq`#%;dqf%i>Mw}b+ z9;G~Y)3v5{CWP7Hf#;)qJwG7W@$M0U zx1VexWbzvSmT5zvm$0IQxLRa-@aZ!EN_R6F;EB=TAoICV1+lgH2KYD(iQ zVYjj0fy_@JqP9-<|D*n>d4N_RI&~8_Etq)c#!PLt5FI?1LP7nHsx5potoJJ9xCjqj zrh*wH;g+rj>a;ue)VQA~6|aMztbmtBk@GIYhn3cXO&QRJcopjBk|2*mKerQl;WvU< zrc-kL&=96qp3LblMDI8Yn`gF3{Ulz**%#2PoU?=cX^9#`aL8~}`}2*K^g{hd1Ne&w zpyme!Lg^7R(kodVSo7WLS_2qn^Is?XrY&EjQ;R=FD(qr>#dJ%KW9GF-+%+?6;5 zaP6r?DB9I$qYaf|nD1qF!cT#PY_MH>l0*_`;k(imP%VZ^hL%MCNy7 z>mf%r%@e$sD6DX6naXi&L&w@r%KMBxTa^cqFf-f_Fog3i89}V?|JJ&Ds{F(F(2`w| zpbSJ!j&RJJOESOb@lQ(oH0V_ZJL^VhXSX7)l75#Lch5Qml)&orJcKaN(*f9uyy*^v z)X=cFVxqC)&b*2$j)d^Kr2SKw)8#qT>~HWA@{W=W(ic909Ep7RuV!dcxa`7ZBe zSI#`)b8OeN18#}D`gE+Z!Pbw^rbyI3cdtHvRbfLQfv$;=$7|WD7p$lGjTfRX+lW%) zi`asvPUIz*^~>3g<@tM4z+y5`A`&ZCgN^lX0Q-Eal8N#6FyNqXq>&5t$A}FZ=eVMK zrMrVvP|K+`vTint0SWAL`=up>W?$VgpZ?O(!itxuejHK_Hk|+l@=%Bpj~i@v$V?=| zisMW}K1xzX0Mm_gvlH13|9V{QkV87YU z=EJ78mm_~^M9Rn^T;!HF_lt^QVK4&Z2Yuj8*Jy&E^aj1s^g2ytQi-8$Jy1lOnr2FTR>CvP?Ak-&(RqOTW;1O-yGA7du*h;xM8qZ!FlkQq#_QkJugM}Za1 zQen{!x1DUNB-e&%j?1#?qm4I+y~zB%ZKCTRy$Z`4k+-o{T8$N9nh62swNQKJfDhg;}{pM0<L1`d<85eH1pn5@S1XVAKhj@glEa*qFw2Nck69KX78bBxb!0Ax;T$D z7xe+clM62`-dV4_jo?w(Q)xkCCwIVk!yU&~VazAs-f2%GQi4^bLW7PlOL$nNPGT68~^Ipc~c;n^=gUq_8l z_k%Jy@{1*0yTvR*6XB+%6rS2hGY&AkR91)$Nuf*##Q>}mZI}S!Ajqwk-=4nIdPxRb4~-imD$9(c;5St%Tj-Bnd!s6KrSk(|zUMF?~y zB^Rp6+QH_jV}ZUhAq`gH$hi1>T)ykg1ias@9j|m1~!jF=C&6=I7&Lo*M z-4LHd;-!_{Slj#5MiK>_)^NSIdlZw-F3B-$Fx73-jgjrvkdRingtGtoTkxY^CK(Ji zcUFS|hz&*db`Cto5Fmcz|C*82`kaygSfBc1`3Z`8{rjiityV_=pWegV&4H#Y3@j%eXDELST&yDD=ktj|7<5_T%8AE(bu z$oi&Fo`xTJVE8NTE=HnCwM{n1RwR*Nk(S%sXist~+Xeob*B{urDaoXbdEF%?ekuqO zJbRaOYvGKHtyU?JX@JA@+ZH^SS8lBQi>3?=&A?)F#5~^Sucs-x_Tnly`-J}cBitqk zYtx_sUw5!6(W2RdMXXn`926*Lp;8@&Xt zDDqu}BZxOtQwL~z%rXX0+*TzV_MQZ?+9!>^K8O@_3cu`Xx7!fhsk3eZI@fnS3NTHU}gZJ6g1z3<`Q*V8$5 z@;a8Wzo<8OSwDKanG6vZny}MWY>}lwms8D!%pXabQ3~9|vX4KO_Bbp!vVF)HJV0|L z6(9Sra*m_Z0x;cn!_10)fou|>ea51wkr5YI*Oi9V9kaBG*uNSdjbvi8v}cAmU_WEu z7AM2A{a+yR)KmIq*3hU}u)mt`z>??DFo(td*E%VPFqX5c{;WmxO!js%Xlz{T5WhGI zv#xOcCN{BX)>8yj@nD$?iiRSw(p|BUKXOB=`AFtkYf^1i^9S*j!)-$KZjAJzMh%Pe-Uc*uAsP`#A93?W$tZxJ)-BSq0zX z8JgeTXKv+S4jAzR)f#D!SSV+T+&7IWVnVh*=+2Q9Z3NC#0y>p22kp*kY#f@$?27yj zl_#*8GgE{{8HO33q-RPgl{LrF$oL6LgobG`3+=4I@RwA$5y;F^>~%+-W=U8Ykf5aE zOC_nLqoY-BlctgmNsu|n?Km7Dd#>}@YMSIT6!KA8tW$6gZr3ohoHY_XRM0PoWkf&I z3t8Jk!SQb+qqOhLTUl=?A_Ht@p;}9MRxo5DNT1S|9>^>b$bW2H;S)HaNMfAiII9V5 zL5extAu)PF_%vnVnT`ah6L1AF2C-?0OK=|$>9L>e~Ay)+=WBO>j(>M z?;SDb{nt#6_c9gAaD<1>7qf#i6uZlG@|#)bq0|X_Om09BkeL?gE(~=MSf-1eO3MV~ z;thZ1LrwmA4%EypTlA7N!1{RU)uji-+3j-t_gh2E@ThS8Dlpav6t=9tibV)oTFkS< zl##a7Wv{mlX*SGzag>-B^(SlHO2v>Pah>Iritmb9s(v;wi zO#;={P(h01d$P1!Gc~D-YEe9|b~>+#+;?A_3RML(ezx)o5NCo%kPnM#|7_)*J|Fnr zcVPo>Z(jqDjAHX~2v!;x%jGPD@ovSprooP2wX>(~!CWqvh-JGuGYDUU0Zg7+ATO1tbYZl8w*D6CHwBNt zV39go#8^@Zg@-w%*+bVYFB1P{g09%#yEr`b@lnp)lU+IdLIBklH9NivhFN(sCzjxBup9#-TT(``8sw4IN zEZ5|KIKtSz=J~>M zgiD8#PGMF9u-XE{|KMMC#0bvH%ZJ6kl=Lj_j8~(=TNMbN4$hj@4K`?2-zB*+ogW1m zKYjOyE6q#+MPX{ki`!lOU`fyj#KxC zr-#?Txh<)sAbT2YrTLEa485srnI@^@Xvy}YQvWAWDeud}%{5?Z!QhI_k30zf!sG&M z(pm=+?v}B0`Z*F8CDCihl4m<9hEdYK<7+-zE+=8hROK;@8fnKhr)rghYBQ0~(v*eM z!o4tQPf?7W0Cd;iIV3JwG#$(uhVZ|eZQsChHK++-Ihv3=hbk#{^txIHOkPs7qE|kP zz*0y-4o$gTNxMmH5DsJ*Ho#_f^`Qh#ulIzkkm)>yV}(onU(mq&4lIY-AW@EFx5B=r zwNq`7TTmr_T7;!hS#*h%1rKx3U?G`E?MOit5u3{mWmR?DRGfQ$ih2LSBb0gY>3kpZ1aFPN@q(kbEp;g{Bz6lFoEAs}L-iHb+lkBkha;zT=W9*BGH-Ieu#Hg99?nn8)OlRdl zqmLB+bz2W75~5q89$iK zKIzjxs8$40Q?{F^UssXNHx_f0>UhtvXmH5YH7u9m>}BqGvkhCF?tb3y19aS#MqiQK zS+Ec=C`{3;|J-y*S$m<74H+9P8ryJ%+Z+VHhpazFlo2I2ozh}c(S9N0O%1Wk&IFkz zYz}Pvc}P)2Y;1?5ORYir@^iz$&L>0zAnQd?PbTu)LJ=0%Qub4_0mCOZ-H-BpCteOx zMU#{Sdf2h2Au$EB2uJCuz2K9&KrJP_(3h~1gpPw8sA>oAuYixPySa_3?IrD;Npkaa z+a~|{=iUwR&Gco|x~}LWhCQ&V4k8~@vIN7;3J`Y&X#VH&?N}(KTw$x$yQU!SUJmO! zKl4I|)LhmZPUgchz?dWR=$X|&k#m6 zjaQ*P@IQuwA{P?!SEO|19y=PQ6e+MGCVJNkw0}TsNiZhWmuKUi@$R}iih&&z($*k_gUolNv~SUe^e%7KRypW1=va;r$qR)~ z>on}_KL3BQ%LA7o?5`eLh`FQwTq~U6f$Nl%=#EB7c%2J<7^x=pT1?*(XmF}lv$?}J zL9nQ4p0#+w4&?@~(s|8?tPn!v*FduT6}Qhjxa&D|%?fvWg$ftL?cp3W;13Fuorh9o zrxmhjg7t)Ue0|9-in%%+{C>Nza2Jw9YJYEa+?(5nzcyOFs6`5zQ$AL#o z-*&P+x|E6tmTo=(9s)~=9CHOJv&vn+dC12Pn3#Nq4;`eTYso9;SAZ92xkND(%Wr-=$rlaQ3Oemu5O*v8#D3N3gIY z&pC2dwY7|@nb=GljK-BJnJp>F*`PEI_&+a*`yZZ%Z3!~-3bJMgb)-(>R6m<}xTN%R zJk#4vWuyWu2rD*M-eeLXN$%|K{S`J4yciqr(eGkia+L&?pd6IgS^gl8d4!D>>z978 zHROCy;>cR!T;r&^J~cTZJLJAZgVHLV?&3a;r)Ek@|;BS}CNU5G8EzTaWjy|J- zM0;;!KWMZ)PAuu!z5<`BQ3b!qA3se$QHmcgopfBA;oW z?v}6II|+=X$qu6Wl;*z5{vJDQE~(3IBs{ew&O?r8erL>v3m^IM{6Tsfrn>9x!?p{} zWk%DY=F+58g=aIM1La8xNDz(c2M?QCq2r5g@5nNMY?IWm-z2BJGLEc{jHExM#X4Ei zhQG;<40$$)lNn0tAG395tI^qa$JsI)&I*Dilvt1;4>zcizu&8yDx-~nu5(SX0`s7n zk^W&(r85id%YAdfO6-<=f=M?yBt)D8z>T{g&3bE%XC>ohF06t#?P=C7@{ca_`@exR zmZgbEZ^JtCfHN!5ifn>{b)@eqen8b}wth*pk(IjV_ zutR?}x+R%nhazV!kvF%|7dEY1c~yTi=gmv48ps8om;HD_4KYXkor+;5-4>krxKUWT zq9Jc1ao2LnR*b4P-h4tUfw`u@RlsF3x4s9J9qN2EUfmp4y(YF&wCxj_TN~bb?1&7oS)ZRWW?j8$Md8 zdVm{Zo<7z7b>4x(2|lH)x1WIU#)fV1_jv`lp*F3(Uq#;C zWBI!TuW5#!+Ktprp}~pg3K%J|WFw_7y9u+3!be>qSQ(9ow^c-vCmW);#PLnGno))x zC5x(gOxVIhDtG930m^a`E0Q9$Hvd2bcGD0$c{#vw_2+57zVJCE4jUxyPs%Q93Fihp zuf(Ng*|jd7Wt0miJ8XmgNJ`@BtzdRBtK-Y3;TSu-`0B_y%*4viwiCI^@2t}j{SlMn zDTw#xD{hYx5JLC@N+hnHn?;W2M0+|cdTlk3vmU^3YmQns!xkV`O zNKLIA1~udrx*0IF71ADiFHkM#qCN2ToF%egy~6EJP@s9jE$d8brk|&R-N$%6LaZE% zeM8|JCwBHrUOhm&!JEQvkDbm{g8`lyjl=T&X*BB~xMLrtsmQ_Jbn8X>6RU2kj|O_; z=Chc@kZ4K0poky?h0zR`LI)X&GPuQ8;=>QhFlkq{^WBbqS1rTOnnI=r47M@FMK+}e zdac)ZYXmWn_i^xIKkkpTBK@|XYDCT1-J@34>4$%UdX+Ht$BfQ&u|48)lS~~y#@h33 z`9;&3x_b?Pu1S=BP*Xc;v_I=~axBo;K|9ZoR@A zJ+W&=yM+=P(D2%E*kZVjuB82JZgBpIS)t{jkC|(#wf%5e{or^vti$9Ud9 z&n!wmgHUFxm%ZLTz#n%w$RGr$Hq=jPk8UEBu@mzcxZdhN36jIz4o=*KS_LUF$}^ZI z*?kHueK5{4Jn^ka)_ra2OS~mUo`|ysY!$5QAqy&Yw^)`iSj7!b(_MVEzcgYyl{&lb zjXF3qH^KQHIArWw_z^=@%G}cQfimCi=z7uy7>JT4mA<$9M#w0gQ|ZI3@ym4}QF_y- zn`e)oo1)rGo&=8zj`I_EuVcj6X_{ep3#<$07;;2%#iW~YCfJ>S8>8#O>soCHClFOj z0us>OprOqjhyaaQi}Yj*XwCxx+{EI8hZC6RH!rJh_C!pS7&f68*8HIfG%)L789}c# zMg&to!nJ$;j{aNVDc-5G%A7Z1FRZo`^c4t*K%KX>+88cd$27zzhJvs05Ab<2kg$jl zGu=mFvA{H;>=xHKGb7jbAQ6)j5UUS|Vy2RtUDP%^GLr{Xfi;JHhpuUV*_P@3AF?Ef zKzu|ql);l5wp}*zMh5v7XeQ`mQpT525!G>lR{C=l0D>HOqJ@63X^@IeVpfPxO0j|F zK+KB>J^}H`GqEVnQfla~4IACr%A%lIOg$aH9cw=XWE}cgvmG@14OIco2QCR3 zd8>r4Z3umC>x}+gVgjW}2|RF869F|3l_bTW1f~7vXc}RT2Q-*H`#^(*D<7{TOEa8! zzLM>Wgy2mMvpNBKI2cmXg4PVBqMn@lU`m#@kPG<3|& zh|fFk;oCkQvY=`k03qvj{nvIvO~^k{eDTJ zJyzJi5~N0oS{e7{E11s_R->x=1vKcAZ#oY%`vuX-0e;FxnO(~SCQomVRPr!Ng1HaT zJS@%(1?`C{pszf~u)`rbKIrorDKU)|;Qhf|WZfi@Wr3m0z%xSNTWrEP@EYcm;&;Uh z5p`lhO7?)g3TI9IA}o+)y>?203BVtdk`Y0WC+jh?>@vjb!^j#M?JTVYJD()>eX z*eVYeV21D^32v)#|42)|)+LntAoTG@4T}R)7eObe>oc@TUOx~>6(l0VQVtb?>hu9T zU~q7uwC;xuxzbVFmzozIlA;Zc+{9-gN&$?g1fN@W%)=<39#u!rzfZVVUy=D9D#zVg zmRX1h6DDETd77_S1zzQh{iHC-&6tvXcZb~t`c189_yUB+LE}38L;gyMPnVkKo@e-` z#DSF1)Wli#=yQ^7H=w+EZ4L0%QEpqd6pWF0} zgKI)gsk9nZL)~{h`#KG|G!Quaf~%l&{rt#OP1n@2W=rRle$_vzC)gFjgUELNDU5!_ z(o^Us)6-zcOr;(wtqw$|*r!+U&PUB~P1dl+I;}!9TH9N>#x|l2*(5Mq?lTC8Gjfq1 z7^9iYjM7W)-*m)59oEp*j!QBVwAH-Vo^n9415Rb+uU@m0dz>VjiFjNOo{n|+Zrp^d? zLpvlX8kXWEqKFf(o(vhf

?vrTZNi-@Nm9c{tw@jVXu`RbkYp5?Q!5 znZKOL7Q83%aSBcoS-i|G{by{4{D_q>i875 zn8-<8%vAv$D1im$nQPkT4h!)ip9<=8Jq$lNItfvBJr>tT>*`Dv&Z000 z7B%`IwTf5rKlFV`w;BHtOKjCnRr|+kyyoDMKb|6pN5;@|_7aRn7!(Db$)i#w=jU&B zr!_ZL2qQ-;Bd(Ye|F4b_{Uf}DmvEDq;2xq!{;3SzSgE<~HN-(Etz5@VPq{7HLyEN+ zf3hPvY3usc5Ec(t{@uR7!AfRm-Mr_713bm`Wah8>ALQOM4eTUQEC0H$$D%ZK9J^4I ztsDw%A8!401T|0E^f2m~p`SXvsXwOa#lS2FO2BKQY6LP!xzA=SIuJ0#MM#x$kX`fBFU@h8n%MAg2%@&SP*Wao?|p{vDzmtncG zEASuiG&Tv!@6P(-`CcWk`wXI!(S0+TWQg+k$@jB-ZD-4;H(`+GQQJDq0 z^-Y+J&oY9jL}NIzJ6`2sF~Ru-v=q+s29CphFpq@x*q{sU zE$?$qCVz-mgir@Tv3q*Tk|{zla3kI?y$Uc5kEVPc4NcPwL4p0i`}aSM;Livhb>g(3 z{5ikLyhWv~ttI^Q{_yv!i}UiHt-WL%iOZyKA}=_00In2piX`nfgjMlzuW$-e?%c6? z0zv?>&)fUz3+*A~UbhwFYYuzAJsqif5(r%2>~#5?;%aAr)*uC;CGeuj^bIBWXLU_E z?Rd^_bw?c}F7x5WO(Gu&zajeOF<{u@EhK{DQf z?8QZp=Pv=R(BT`T!|tI)ifSBp*lT^m!hZieFm#|UZ3+M)N;6IX$T&A$d%@pprhqZQ zkKM5Td9n*R*n!eAjuUO~8KWLvFu!Wkj%~saNN&JXzWoOzO3@HQc8I~~ z_j|e`D$w@-TZlM|pywv=?<)sj^A^1Ck*`*L0obnLZ-Nggm%SDHY7s@CmW%N4e>v&|3*GbRB}Qy zUz^7$`+p}!*N~mu6Cml*r>aHwpe*k3Fvi~|7Wr^*E59xm9eXMfQAY*@6qF_?p6Bjh z5)D9c7i?2QuXTzeH_Z%)qIfJvw7bAh$~c6#X!h@X9z`CeZ8|R^OusK-E^iSw^y2X=S$1Y64mEJc@I9etf{SP zzct`*5FWDN?WDLUCVf;!M6{4&=@bV{0lOWV{!TvTk?PQX0}dM{2UjWTwHJInKv|ke zp;6MX?~7{7f@R$G5V6UxZKeKr&!JwN8o?7y(PJpHwjyB(tJO5l34yjHz@L{oVB0W` z+SUAcxj3-9cSN-SZZ^09o1A#o*xyUVQVgg*Q4$kg*{)E1n18o*%CYcG>Q0U1b0622 ztLKDzN9+c_m*p5@0ilsX(7O0r~sJiwQJCZ6IE@CW*T z*m?&ZQJApHwr$(CZQHhOp5|%Wwr$(CZQHhc&b`S@zRbM8ppr@|Re5UfwN3#igdCcQ z3|Ys#z#s!2T#6%#hV34dDJhU1A zxndKNjd1a=ym$a(R{E|%>=4Z-LI8FaaE>lsDs zwPJfRlj1!WwrLGa)?APJ!|8-BUD;glgKdKmPjl6FAX1KpwFV}v4%|DR8!Ct047aqh z@UQBC*7#>mfg*t8WAbd0zmf5=A^q>Q>06OlI&2+i%S#X{;{!31TU?i^gpjWflK7Zl z0!k(DU&j?#)1t%Tg|{t>^o=3m*3#&xErR~#XGLj{g+~7L?)S{RtIv%?*Aaen zGSWZosRa6Y9cwUr+l`u{Np|F0DFOaQ6)n9&SQWnrN`Tb3`O2@5`gSmMeuIMZdW*!Ir8sgUZ{4Jl*oejHvO2#e{$W z**n55G+^lpWjL`6s8I@{zFt}l6QRh`*&lvE!-#QU0kv(1qNuWr<{g? zmTomw!4ND!y*5qKWWu*5CNlb)1rxQM{lUhuIV&Kq<}!;1(d%*^5X<21#YnD_$a6-6 z(!oTTd|LlTpY>g$WrCO7DLk$^a_RKhNphP&FYQbJMR@7- zUb{25P2k^Gb3XVO#$ehD9BPe;cI5rPOhNnKz2N6>68+1ct;~945#ir=jQ+}Xi$=pt z0Yz2iO@7C_3o^pQ9MuDO?pDJEi(%L|q_x>x@Fx@hgwk8PpFcJV(Z&ZxCQuY{20KDP zwJ$FCb?4+b73+sIXKga)KB0g_c||s}D@~}8dp537PIoLvr=*Qk=n;6j?WTYcm0s>a zmtIXOL?pkUbL>^~Ie+#DnJ9Qz_WK9ikO|+m*xav#74@e3M_+-y{$5nKEUY-bj!Lny zKcGsr_LueunC+hnmO#u`-h|Od#`y3c?dh6~x_wSMIQYuLL-?a8%iFRYEeBKy{jBb( zMx~VQJz`1J|I24C6282X#Uo{NN^Wz%-^&~JEsP>EQN@P#dLTw*5wNOmw~#_tm*@v( zwoFzLjw?8+*9|(rSooup9U&_oO17YR6@$i{$s%doj=EbJI2@82DZ|4XzpdClFzUY<{p%ZA7`DqgQhl*u%D(|4 z{xFON>uApAxYK+>?ke5GF9sV>PVFG38uKtx*R*iP4dUaqZun)MzaU*Hdmb)A7S+c* zDY`pM?@%f#aBfwB?SI_X4hiyFK7IkgQD$13)p@Q`v$0<4mPNb>A9s%dhP z`9tDG7C4shR;vid!Q8##|M^uaD;mR`$5h3Nq@4Q6^^IFfV4L_3u?47()$`J1uI}+b zY>}xPu`PaDw||r8QQ|0>YI2ETo6Sb3eUPaM;rZZSH+DG?LNUAW$*v*?K-Le*}6ykNJD!l-Dg4L;J zoIRP#f7S#pk}FpnTATViX190UFCIpSs-A@_{6AWwu*2W2V$k20L%7hqx+yOtDRQYv zDgz*CqsX)09RL%nQdpTVG%mm^4m?ds3T4rkIX5mw=nxv^2}=R8cY1b3-&Ia+o!Vs@`UL@@Nm##lSf{|CJBSV{@vm`(Q~Hi5XJhZ ziLW{3fN6xq+1j98qnu$INE|hO%vRGfL297uSm!@CkC?#x_k*LYS?D(<5Hu^^F#%9u zC6X{J=t&r=agSTkzG_@3^%onNR2}v%k>GX~dSRcUPYReoM_5t~r^^$q0;0s#jVo<0 z$I9P!71wWY4XUdSjFp*5#iW-4~JD#s_oLuq%q0s5BnEOQvfv$4R|gs)%NK8T z%lCDCh@?B5)fzKDMfR45WFpAcdU5+ll|Tn{ruEHC0k;uH`bngc#)k5%K7Ob^WlqRN ziiR`@#i99CQkCU6T@qPJGt`h!H*otrcOsloT-G&QeRRXThZp?q0y{?~V>uR1yAbaF zw&(Et|5gO?dLj2vgfo5{YnciOKWx%Q{yxsl^X;8dsL}zsxhpq$FK3O3p^6E2PkTv( z@ag-!H(a=~1lzs{`H#=Q>PnzD%47>>II-&RFv^k-@j$e;61R8BwT>%C(1)vwf}T+r zH>_MvCEtUZl*KivzbuDfJ$c@5VIUCYB6vf&sSFLc60FQFrZ(o%w{=zNKp}YU&IT*oV(qDk%e;Nm;${IbP@-aMDY~Ko%^0N_ITWqGhj_QJniF26Kg1=Pzy=r zMfUnrHMKLQ`LrZ4OZe&yZrHrp=rxD6C?FJ8^qi)1;I@|A7uwl%zftR`;xE_MS_nfmk zR|*Le8&xIZz~$nI`-IXo-w+#|lAROLe=;+upC`yt|Lj8as1TDKnQ zcm+>}-I2};FN*Au={8~o*>esx(ge<0s~|9zdSmd9HX8G&ZX>Y zOmXLq_%u>GEdCprTriJ}34b`OPoOhyQ!`mNJRL~AbN-K;#sZ#CSLU>BD0&hh) zOGWA|hXG+x5zpA5WThM211cFnz-k=m#Y9NN@~>tgs)b4Y;PE* z(W@pBv~(ia$gIvUrKpKeUo!cux$+>YB!s+h-B+z)bkk=abm_P|UCw7$zM=*7PPqwl z=Y=M}C-|Rohu|2$>JkPeAdYaeSQ)+`^~7N9;y;T&Na3GNW4gP4o?q(l7#h~!!m~MB zoo@Q|+9ze0dPx?zXh8Bv)2f#T_cY}n&2yLiK&Y=Hsqh;8ie^aY(S@C#v^Tp-*r(bBAYIb8>vWId0r~L7l52C2RQ`)>q!)6tfpT4S^mgaAOcuT+=k;NuD_N4 zaL_o8_(3YOpMVMlR>XdvR19|sCxfJ?91KIJ%q;bJ=em)l$IfMyBb>(wjP@FUSY3fto-7k3m|uX`J%}e!-MBuA z!5!0|zy{&ILi6NEaEYXHNFh0*Ey$=NKLMeFC-MY@+@|7=uSA{u$pOcNjaDpV3;( zGgJm3);~8<W4;-Y@C?&0@Jqb=a2{1X@Oy5!ekls)ztHxA!QW8QLm9) zlUKgL3#>w^e;VZF)oXFUAQKarEraqTwY9!KP~2I6pl645P7(XURQT+5CWqZ2*#a_9 zceuq=8cfR?!c9RM@bM7{_gE{}>5!}6^h;Z)z6Hn(XUk~1D1UYi6Ez1FJHic&t#-la zQWs}avmA8Tf_8obEVJN+8kexDZ!^s(@vzdXkykL(71Q0ewYwQTS%Btomi#-5{CH?N z+Ufb+nspB1|3x~nkGW{P$)yt|^MG_{88Hw=ysUoGd}nF{tjxBVB=`V=e_t~RTXr5~vwZ}31LM!1Zp;lJh? z#LcLJfnnUv@*b2C51JlSnBwWTeQgzF5kP1Z<&-DF%szH(KE+qxp^M{1!i7Cfv?GV( zrfrCB24o~_c^KDeA;D0P)ldU&r@i%Nzqy8`NGka8}T%?3?hp2W` z|D}X~`4yM9K5OzbZM$jTX4=~mCKS}m4uALaj3v2GJ$5^64Gwr ztw2nUYLA>49paO+?_Wwogx&R*TIoC-Vw0X9k~18sbAO zlk>i4xitLG9i&UDvd+AEz=UTUwit&PD8w@m+MMpj2m}@7*GB~=d1i^c>3HtZ=8)I| zU5pt-u)OrCTwIbp9>DY>f|9dQtm2vEwwFBxFG+fx1c5(Es+4%w(kmF|QFF!k7=H$% zgB*nI?1w`LG2N`bO7*pK7hB~A6SDdb^uxUbqv3t0QdT@b{hIq$2Dmq&pI-P{-REep z7iZde*wfzWuubB{XO^4jO>U_k)f(U;K|nzDo!s&OnGoW}IykuWVWo;jmGEECK7$n< z(x3d|G`PXG@AYSk+BBC&0o}4fB9gl?$B=GTI_APGyL9O_3ji_iTz!=GO@e4-=w~!o ziewWvSSY>A%{T}tOE}M`p4M-5<^Y{tDMxdW#rbT-Fs8x(fC!`C;rsrFC#{Yk++dtj zJev3@!ues8-p7JZu}cc0j&7j0n;8JZ2H?tb^N*IVrT%ISeK10=zR4XpqT zMt0;gId0WgKOU@A;_a-x*5GqL#@BnL6DsXCbq__$<8AM_tf>pl1J9P$Ix!aZ{AApH zKx1L@9FLuLkgm~r*re=0J5R8C0nH*!wT|Q$w%W1Jc}|MSlQ~#76n=H4?Vn&H$P|WP z)Wwil5n$cl_fKg8u(sa)?ByWicCC#QbfzzIf!ZFOb4V?%L9YUBdyNKuQE9SQPPgie z!_+|NBn+Am{CeOg(yv>yLGN|45a-|9hJUdeF9x;Xb8n+j7eo2Sr@A=)Hs2OMKTwf^L4Ir1?GQ%-U<+tV+QJx%0^CJVu{eS-xzH)cff zyyG5?IXBG)5IVhWgpY@-^vQ6`H^*f%8J3x$qZ_Mj``#)d1cUk9W+_t;(9KyYW7L{zJWrx=Vxe#ZOW!S8^`%5~ zpz1h*Fm$FC?;7(8%N( zYP*{1SF{z%YY!hGVlTKw7fwcgjfEU4=doleO)3+9o+G$s%ffca)px-fCcp46iq#ak z<5*(5{UK<3#cAai$#WEhzVQgwJ-_We_{ZZSf zg@Y8TnCR}HkRr3;cz@GWaBvD!95-mOG_AtOTa8pO6>k&#SA8yQbaFOxc7dUyj*zO} zs}?Ya{;rL$Vum`-xY2P*)iyGE>I?B>B-s1;<>Vyhx?H(6b?smsNQn40**>q$c2ky= z4dSVg{}kOKCRd%ijSB;Qy5OEQ)B1dR^47!Q22>3xVxAMe%;$wD_EHq;d{S$vcUJC{ zmL8^fei@k1DGQ@Wyr!iP#nZe&bz}sR7J0{AZ*x>zmWaHlN&i)sOep||qU?%_X!86; z@;^rIGJAjht;#-gGp$*nft7;ki{y|0Nn`!l+|>XQSf^i;ZgH)Fb>OS3dyCNiSs@Ks z^vI7}AKHFG6RV67%>^@|_~^c`oHrJ)Ce2?}ij5T&M*u48D)SA|kG^{59OHsr2$jl> zZKzqoNp${p$oqTWy|sYPo(rXG$T;<|t4Ux9d)gIQq|xm=NT&onTbIK6z`$&_@?|+F6iNnx4 zSS6fvBxZ@Wxp<)J%98G)-JB7K74}sOE(vmUISM=G|3ll&(OY(7OpT0L>qziF4d@xZ zye*LQgKzHj5Ex=?h@^@0b%lD-j|ND`XKH=q#0t7qqX`gp7{JM7s9L>q^H#sKZeRvT zkJ?L(MB%SLQD!A&wB9a8tq@oN^0^D6{J8NtS*_Bztt|P_MU2|uZ-4VoVe`C%lc-aq zY_&S8W-Q%wO9mGo`jEFZ=7`YBa2-K}NSRHE6qK&2{IvTL=Y8fhERNOsqan=Bea2qB z?NzoqPNDF<{C#)H+LaKuOULC!jD6NOLsOS+Bz6QSlhEb*HwtpZ(I+s$D_L>J^1u)t zGJ4&ymq!u;L~~uL^JsmR7bRXN<08+pmW^5J?Yj4E%fF_6ESV*9woo6~4NPGE!-dMe z%OVs{ht?Y?TU^gft{kmc)G_Z*<&VlFTlj*08i)%F`2{vIqXBC2(hf_@E+2{a1^Vkl z+1Wv(bORjx$QIXKT~N2o-^3d89|RJu$!_@Oy)y@^KP6dJKZz*5i*OGW8XK(5BFM9F zqgg1wTHmr4hsDg$W5qs{HS)W%-E0ERTF`Wg^{swz^$k=i4bm*ZM_HlYYj%5vA24?A zkJ9psrpkZR_em;|Mbnx+R}dB8=y8UCxjH4GdiFj=;k2;emmKo&*}6vSbdhS_ zPuOyEvz^C(;hP;x@m1XlC$8Dgq5(bX!mGTY&ec`3PiV|QH0ZLiLYWXm9lgF~axX(1 zIyRI>hKJ$e*=MXd`MUeY9-_fcg;=IhC3w0zzLCn0fzz5vz{kDqf_ypGRQrVB|d8a10<-L^;Q{GD)WM z=zmAw8#o{DT7~cx&!lX~xWw$#$_Vk-70%A%xy}f@OE!?uNrY(>!3cRIdto2z78T#3 z+}+eT_3jfk*A^8L%j{6V;5~LQh+%ixs`f&V^*P6R$%gxh>__lCYx^R)xak~$fii?=YV(+mT&msVt`D7_3E8?I+^ z;cBXhpm|)A8?h+;l1ei;G_cuohBENPAx7WoE9T_jCe4+Tc?~5Ff2VPzB@#>2A4j8` ziw9f$0$JJZ&29>*iK>nnGmkSBI!Kd~Tut>cH;XM=Q0|H#KzhDz!KojHEFm!_(rB)C z$5*^$b^KU-BKGS#pk`wNMBj{y$1R$mMQLdOZp7%pu@R*uJD26THv!8RggRF7=nhpe z#mcka!-5{)7$AT3u}WpawcoRU#$-H+{mn;SuZW zjN=gzM z;R=H$?Fw{34m?(0e{wA9G(YVGH0)5lwPz0D27i=(vzxshm3AmTV=n;y%|<#BS&I`9 zO$?L8ZlB^iu=D2aUri?Fa??v*c^tQJU=h3m08ep!dU-QsZ^ur2?T5*>FPttjaahxb z2&E#I@~So`nS$YJkn}2iHQflJE08EmxxS&{6hmH;G8~4W`K&6nK?HV-w?Kgv8GC?K zC`n8gN)}dMsUQiYuBid1Ll5)PQ3W)c1{W>p2oOS-Nd?Uda34os((Rw3bhNg@2uc`D zjMH$!AFsApfzxV4aVU>j3xsue0EQ=sw_q*>wy6F&@x+{MbtBdSfm0F%rldjFpLf;Q zeO|B8N)X+WK4I_Hk4=W;#+iiVN5QvFpugPpm!xl|A$k@9u@?6?BP<1F?06)C1exB) zIEGjDToN6aGl2fl`2pR7b`3PSZ}Ju_o+~OJkD_pQLG=7)_b7zfTUk|ep^U`e?nhEf zzDo!ZKvv8gCa~Ukszzb@2wXp+ijkSBc#+*@MA-ADD2De28*6#)Tj?II|iR*~{pJ!~<%tOoQxrwmoc%$J z7J5J#DfTLiaplr)!2I^VFyami(1fog!Mbew6M$7y0j2}D9&a^%WM-OiR-EQZaJP%U z%RSa0fpGB(gaI=t6Cf2^BdiEWJSArYAt6Ahrq2b1O!};(>VS|STcL|>{^9wO>a1Ea zLOgMRi5_S5v*Hg(=W3P>!Mp86O^e8cuJ`w)c?SaaHV{*`#8sggog2KHcrl9Fo!r!6SVlA5;BdksZ`L z{-RMfy-!apm28PJ+)AlQfhuO!jbXy)e?&zPHX$`qhgX~(uAEHk_ZI7|9vptZG6?B< zP_GAZ$oOF&9M`oH*gb?StwJ6FrWfU=i4F5H2Zj4w)HMYz5OE5nHyolBn)qQsKI9iR zAd)_Gf|L@r)QxWLS6B--r>X(~H8?D~l~G4)eR`bZadnUfwKt=E8vSA8rTQvKo`6E$ z*I@@WzM$=Y-a@#9UCwFdPRe7kuzmQ*GgM`|IL-Zz*?cTFrN__tSng2Jn#S`oo^0W zePsjW1ROWVcYBmFel#3b&l%o>(egWZ553@HTZ6zgQ3#xjN~a9oOkeM&TsIm!Pu=ih zV^6;{aRpMK_K%~$F21XLeB$-DvjP~Ie=1M<2Hl3GRB5FDr&ujVs#blUxMOf@C!kiT zOFa(#5T7{0&G%J8cim++rV+ z-rY#57mk#={IkvGH~n=45aHct2lwTkWCV5=O)k53b4?6QzPD@gv}&aR@TX|Y1|u4w zAaW?g^~lZJ6cp!CQH{F{xT{f_#Re+z%Z6I`lL!jJQex_K%ofZjuoFsZTV6%QGwf>2~#y#7}?+8Txn`ANpt8%kU06q2Wync8tZu;9}pz zzzSk@^xB}>UR}RvCU68T9T+BWxiS8V0=AqSyVuWj0YZ}de4<$4&Qlbe8R>;YvvLnz zXP61ZT@LtU<;!WGcBKGbBEyPIekV%H#x@jg(4<4r4zk_Ll4B;~4+00&_xoUSs|q*WeQP z*n}y2dx*e(4m&W5Uo^SThmS@ibR|3xLdz3qnO+S$GxFw?NbQk6fE|Ns3R>Eb*n~FG z%*mX)oYiLPggWRcyTw7?=BFsVVSbrQr+Au7*DN>srZ5;-RHyx`;@(jn$VDUPaePF{ zu{e?qK0W}lQ9Qs_tq#2AaloC&eN(}OD^x=vbOPip8agnQXjNV*yG@cef|wp;Lb>og zg6tK)NC&`8uf|xqCAG*UlW~L1hlval|I_#Dq=gIW*K`e~;z*c}fpplyd?Yf3XDc~_ zC-2j8QkjX@6ydrA4Ua8+eie1PY~RL z9gS3e+-q@!3_?D{SkG(KQ7&Kl2Df9Ky+UgghM>L#ALzR$1XaLf4?e)_6QL1nmCEhy zo}8T(_n=`Yr5hk`fC5Iyj^hA(gnD!#3W#0a;Ts?r#cbhe}czAWkE%mu0oNIDjGGjQ8Lk`sx@C0l$GmsGqg5(c{)B?Dg&3T?`_-9VpUaKS?4Jf6n9>bvmZ9Ye^y| z$_~gzeY+WMREFRvbvNptHOfzj z!9B=eQ4499i;y7sOf;jn5XS?dzb4JZ%@v7?jMFwvw)!AkvSS4 zvdqz4*~mWjhnpQ$?1Ei-ZC8eHV=m=d=gZCHk^@jS;@fvFXuG=(SMHDYO+wPes0K3B;G83nEBo z%EcR_95K<5`juJCOT&L?IW8qlQU1iVjVEhw@gVrJ(p2try%*5uN`v9{XKr3i@EXmA zs8Qg!S;T!ferl!hReth}MmCZQFcwrUEETV{}AE@z^>Z;~g1ZUfLOJX(d{2|#}g z7Be5o3r@(bLXDdwJJr$cC=oY)m7b$@L+8 zJvrikAt{!2*Lt}~BFAsR#n>mon?Sp4y@kcqNM8mU7`g>2m)b?Mq^hBG786^NSym2y zK)VKq2w6yZK)!CW8kiNZQY3w6gTHIyWj7mQA6q#v{}dFWsL+C7`KtI4%pb`T|8@lt z8HLssdqJ22cMNV;x?b@{K17a8Mtuv;snkVg%wx%u&af%v;W#iGnp+_cG8xlsjVk;Y z_*c%o$o6OMv-wRC89Hz0?CzB#e~BWz$6ZC2J**_nOe(labf0cQ+B3f<)d`Zv;CHzU zWu87%I?M}9+C(G&T|S0@{PIDve5q@9`GQ<4hVAkmD+JX$a|R_xB`^6bO$OAmKreO` zIw6awZCA#K&B_dGZO+g2H#%MumG(fJ(g5P?`|2^35QHQK#Sc}-_~^Gd+RF~6Iz`ke zU&YWj_Sj0BZ2`kWr?#+5pj{v2rm~whB6(1*alN+qnxysP_&YtjfJ~nkr|h1f6+JHZH)ikB*_wE=0KXs^m;~;jAPb|$B1Tl6bR%u-Y$eNYA8Xro zDl%-;Uu!+;!x>`}j9LYL-Rkiz>u!KluIOaNelGF&JM8Jo`f352KVhnj>8*lZI5?X8wjB zTz4A*)uKpeAutXaOV=v=A5KK;3`TVTv+V2$-ZXuY9Rr86qgrzjWrW2kpYRU<8#^&- z4C~#^qz|FbwfMqo?%HOC%?vBfP19qGZLWxIP)Jy+Rr_FeKALM0du>GqjAREC&rHiH zXA*b*EJp2&3H}wbDe(pIDZ~GBbK`PBSXJd@m1FXtdXt3TA|yv=?_6K)S#MRhGlC0k ziRT3wLHz_%#Q$+bcmK{{cV1)hQ?p}J(XGU;6XIcU(GRO82@QfEBJbl&Pt5(Y83qOJ z3`I5l>od%LVAtNA-oi&0k;IcUJcmd#U@b}!a+ooz2FcmeN-Ha`OzC#Rg0wA3^P#UG^x zCEzpv(_7@LS>lH7*_KcJfiQ1@ELkIpTC~4&u4M84OV;300(P|BX6NM+y|%Y#$$Z(8 zvRJXr55~jBJ%T{O^Mbl(GN-5tZkBBj-nIG?=~_y#W0yWd5gn|8!f|UoCfk4vk>(u(|8e1kJ&!0Om?oycnG$~HnkxI5vP64 zS(KL^6@{~521hs2l`#5;%;H-_EP=V@TsMa?y%wlc157g_f19i7RfrDv#rn?s!F0X_=ZP}+iXb^p2t0xQF9yXAHOqUtn8Y(Y2 zDxm(aKsh;;vP!6Vm!ww>di2CU&f-hJOWm8JaaaT>gZa1;D4|cO+wTe~q(5}%u##8{ zaQyOYN2A|U;R|Z5$6DI>R55~VUycJ@>Plz!g|f$e=ms{;xiD7&Jz|Sl?hyq6 zlQ0Z)x>klq*t~MaD9AA@o)-AnXNg+2Cp^N;85ew@iVw@CYeY=~-!PuMK@5SdGZBB?c+kvwaE#*aaq^rCCdyhJ-hr(h<9EGU2&0bwdOb<%ya1Aj>P0B_ z$y$Rs-7skp{FW-=117ruw3GuybR@qntnnn zKT!v`{Js(Np$y?Xub&g9u1*e@OIX~%-3O`Zxl?qC=V-Ow=GCxcq-HzDUJ$#JgTFU& zATolT{5g(pH(N0k=;oW%DE}1?Y#V9_QDWf?3<$*{NvNW`p51J|EMfaLkY-|p-B6Iw zBA1>1#=J_k7xpaM*S#coq1T-M87zGUUr*$S7O?V+ltb#xwj>QGdwNUh|0Xg3H>W80_J$MGN1GB>O0^~MJ3 zU;d(`=g$ug3B%O86R}<2FL&px45) z>J)(}tnPcvBO`QK_nJARWjvM~NDWg4D$_Xn+?y*rH^k*Me-q%2KaxsW3Sp~?f-WT> zW#ECya16o(oK&Uf-0*6wPyt#bSc`!kp@eidcZ?#ARU}err0!8(8UE1d^|ey~w=X%) zYf6oyHksR7c6WojBH7KL5-|fEFjio!4C7`mE`nFSb9S=_PQRmmqWH<(9~Nk$qf5?x z>@NaCcP7E-47y+CRA~+4PO4)DIM^d#6Ix3>DseiqN9T|np6XuvD6p8rX9{4+rE3%G zP6p*>FvF{P`u*SpfQw?Ridi?bVpZ zJ}5q|eTfIGzpl0Ud1@^o@MQejl*dTk-`jb@sq0pG0F%V!Jl;~t{D~Zs$)Yq22H_tW zRadvmI~u#Y7egV&BG+KXs|>H(<7`YT`=b?Xzn-l3iy^ZMD>Z#=cY0wL4T61Qcv_?q zp#sF}xO-J%2c|`S`Fw4u zL!?&|d_1GcEC01hrgeMX!&c;EBd!!LnP&zDF4k8{t0#oz1M#K_e#Tmrq`#7o1rS=5~D6- z6oUzXlhCK+#pzm~<-UIb+8_g!ZfCZ@!Zq{KcLl{xxt5yX|EQvwWY>Hk}VMS_!ED4iKMwP;qM2W z{v*cfxNlr$B)goX%QcDvdNCafwsXi z8vCm%t%|Q#g}@he!FoWSlqvj&)|>4!h8q2cZ!EGzIFyg=Ko_M{I>D^Z@qr%VCKe5G zSw*;{ul;~wMpHpPtmWni9{CAR3!V1PH;j3K(UpH{JPN=f9e(%!BAN(uQF{x8DE~Ym z>b{FcIxLjsJngsWCM8!{>J7=%UUS-86!DPtA_oAS&<6Q|oA7(oi=bkS3TQrBa>Vz$ zj4A5nHWaflq3*{9dRfOO$b=nf%gkfF}#Gh0|&oS-k(JjPd3Km zMzQeBVa0h&rKeu*jzBwP3oNU6p(s# zv`;|s;g}{z$8h`0PAe@8{aavQHabmC8tSwvzO&F0P2_U4aR@=VoE^2Q2r<1p1Z8gu zVeNNexWwTh&jcU>{>DZEpMa+cj{l04@S*cMG+*-Ldm*sbdd3(S7^pnLZ`>D0= z4nSCQth)3Ix%FQak30OtTkmTl*#KGUf%bVL;k){q+DlaZ|nyEsOa zGai2+w}`X_ndsK$%GGU+-j-mblRognc2B-!YgG$m66fM3W0hCqM3UDlo-MtusSaJ7 z?Tk^Cx)~FPWx38kG$~jJPV}Ioiyt6{AWQTX_r!O>7HhkCz1jX>=1xV*D&Fr9Y2GpWHmyC?wQkX?;Wp86US+;rIr>RGDB#%v!YVV{om# z<_OsM3h}F=9hA}R>o%G69rv-Efw0t+J+<0LRD=}PnNV&(!#COa zW!qG0(%qYGIj?m3ndfEb#AJ}4hj4iG{p{M3WIcokXlF7SO|k$J%?uVNjg~$Q+XPEd zplkxILy?FIKNARBv&Q*`hrR)MnejT3<6xWe-z%Dk*bdYI5g=i;@yLCSosZI?Say;k^XQnzSWs4etReY3k!)2hWkRrWh4F@VhA^BnB z!&o8I8mfY->$@=u@0fPmCFb0!Bz1IypH;YVVw=NfOh<03Tg-#WIrqrK=MhV?YO0~k z0H9z=;wzEAn!zjoeuEGW_gQCaG7w^Dq69Hh@%`o`Dofb`7`@_Nx~ZlL(4YOGMq0&O zj1i?V;24Esrzz1`S@F0Ik60NsO>;*~Q?C`+c;D7ar_H=~Dibs;zF8*|$UyHI4x1$QlGtF?mL8B^=8+ z3^c{e?AH;+!$MZX$uYJc&Io}TDggdmY(pTRiir&U!41p3DF7v5ngN9t_P@{i{(HUr zzguvD_Gl^vl8@r(&sA5LcBufMtYE^^9c2;XnyxJUYSn#6ou z?aM?b`{lR^(|q*?wAZj|TVq`l_3HNUW8Pz^Y<;9D`%^ z;TFgHl>fF{!$=6)c}JZ{1j{zY3FDE*%Uu^q7(_LE(Uoc%u8Se0I0fH^X{1@_p8W;g z^8asj{mB=p6k78T!IQoHmGT)}P!VuM@N6XFcm{O0M?-WaEZ0SgUK5>$FYe?Lwnrav6%W zhz$m8x-7Zq!_YK;L}zoC+Veh39)X2OY3sl*v8VDZN#x`M)q$V7>t4t@^c3bYJORZt`oEe}pjG>Z zgTS@D*r5=8*VYuaB*&CkC;+!_-t+AyQkB|@c*`oWoy7Fv7b^mX^J76ybW5V%t>c-J z_bT1mB@^?wy-F(RBVQ7dr#Z|#g?`5vZ8Z<9Gkd-mqz%pxXWQyX0*`ybaB%KhWlJvl1T{|*jsKH^!va=}oiigi=a zjJ-*tH7Z3=4M#_)w1_OuAcSLYZFTN|p4JlHHt_GaEmp_=#zyDy>_jL2J%yD1NaZhw zikiWlNZ(?|1)cg`O_SLiN(eyPaMUW3f)fjL&vYw=o)QK>xHZQ?p}+n= zt1j@7twqVhj3k1$uukijm($#>*s{zM$xyl-)>n29W9TK>p==#?W!E~c!UB8>cy6vV zP~b!ks=1<-^EbTed@YC8O2Y+9ehrz!pW0YyAGhp>m(~v`K2>G;q%_W}w4Q7bKfAu) zd7b~7eig8@+1i)rpT{(DQX1@p@DWjb%8!^odAKH^oEQ~KL$Vip&vN#n_!^%24*()q z;D?7jqk!sGjk4cE#=P|5*u2E@258g(>A`~Em-1O{b|O5_Fn?QB@@8gDytYt zNqE1|fYuSI>RBXQip4_*K4u}7Q+_}9V&Fr=@c?Ek7XLQ!Uxn-UFQF{gWkJ@m+56Ze5_QGK=a7!#x$5hfmbs47ca6q!fwlF3+XuXM;D-BiwHM&LSuUw8m{? z^`v39{?dJrf*$W9g>RVRWlnBvh8yd(2GJ@1%GjTBbrX59z@U-awE=5}37%X7^>`|9cqna>%Z*9iFo%-Bf*mRE{f$Q3 zw?qSX<(--A!_EJ??o#gSAiG|<|cg3az#UWA`ynYnZ&FW`9K-?rpyLf>12Q2r|D?Ka>+!yBq@3dRXA2)1<{RkrcoW@KWK-?{nYLzg7`s+Ot=C~jy(w}vnWP}7 zr3yRsJyiK%gGFS(kz>H6vBV^sk8(d_%q0b3V!Myg$qU8eL_ z#YpnT(DYK*{mmkKU99_oYb*K3^y@4ZTav)+ zG%~54PIJS^QWFdix}GMiQHJ#4a%08}VSuiDFS{qK0{G}*0N}$c`%!zcY%o^QYfYRo zzO{RxCuyBus|ze@OQP5nCC~=yRMP^zDR|_0DEux?m~|hV22~~9HK{S5@bL2^e0Xo? zoTbC4%1)ehZxKw!*00b^=$d}&w*Cp=BiRc#zqq9-{Ll_-Sd0(#2`i8VMed|JO!Cf27;Mz?E!1lVEF>u_^{=orel{9{2DlH9@o zNYolQU|IV1p6eP}`HEFL?-5#K8pWSyS#S=&1AcFGQI%NvbNIp^amyT8MOuv#b@vaW ze2h3t#ADNy*WwlY2>IOcdz>=6*9@(8!f6B3E{?u=498pS@2F>;B_;N}D-|-p7m3L%Z=rY8Rfj%sqV#&e_9IACrEC?swei9_)dkIxVY@l-_QiCixvTb<2) zW?-9jj$5woqGq@M3hK#53JUk0b@C_L;N}3Sc?Ir={N9;K2yoUXB$sohD6XTDudCI@ zPcHgf73(Y4pZ7;Tqh3&&GOw_Nf7O$IwVgYYG??->JV&O58xp0gHt`0)R6Lf7+a+AO zLvK@8BGKbQ!13I8fB4x$a0&agQ{@ZYS&K3fnVb%m>vuIfY_WxS=7c=mx;WD`kuhu4 z;HmIHX1tBByLywaAy~h<5DTEnHn0pddSOp(H9Dy4kiAjT?k4B1@bcJ=x$_)z2?w)M z3F>>DHfRK30NLQPVn?~dPX>k!m`euq(+qJYki3NI~M?E3tr8<^D z-I}mYowUPMMwzJLJ59OTp134_u!bMe6ZLs=$0 zj~h}WgmUC2wuh8p+O<-M-x7)8oE^sLq@pgEZN`K@)coO&&x#<#a*&IwCit;J}vWH%;6Qcgnh(iksWy>_4 z!0-kpuP_$F?jE;}*?wpc7qc_6Oq#B@oN&RyMsu{g;9OgK^;xF^tnI8^tIrV)a}s_% zG(VU9);`u(9wl#@h07rHit2>YnbpHR2C3^ZXQ_nW?K&*wOIHyuJnnrs@PFFppRSX4 zS@?C?D-cjwtwYSrs{Zxlsu~b^uAoSF5<3>U09!`wlGDv^A7lZRVNcG>@_#I?qtvF_khs zk2dskQI5pS#SiNsirG)Bo~5q z;|4TI5i!2cOuW=qeB>l=UcczxI*%eY%To>$N5O2lg{==CP#@7NNqFp2rL)`dxmuK@ zz5N8SV#^Z_gL!IQz3{bUPt#(Y!GzGG6?K85h7qxw^cJm%l?u3JDNf~nhm2_sbih&f z)|sw`0-Bsgl(R@sEr#rZ-VR)dNrnCQ*5cOO8gFS%Vabq6VHz1)+r6);5Qcj9W9rH5 z>vWo`pEx1{+BP8Qw&I1pgolw|hABSaDUox8(n2Y6Kx)4})hmKtaYXLLbeq6I05km2 zQ7*Q!GZDT^h&>kI^dp1~Y4bqeUo+G=QMWWQrOk-bCWb;xrrp{F=(P(WM;)dyw&CLjIF|>2Qe3FzVlQNhueFW zuYjUSZrDh|C->!LkfQ^W-(C@g?HC`HjMC}6ovy^_$xYQKa=h-MSv(GsO|==DEmj05 z1DOqR_%UQ4EE#TExkiiN9rLtGkG|SD#pI6(uy3R;8TkZ&SVG$=63FF!{6)vERPpSf z?O!y`)p#&_!2C%WIKCwO3Gs;di^X@Ejq6u1bf$U%lyVjg z9EF@H9@oZ3LDoCPswrJ)(car{A((ujKmYE?F1d1`L(ma@jca?j#8}x1SVTwKDGM_G zajaKkShH%30iT$^5zhqdk`eeZ0}JX9+CIhkg|R`I6>y%PQdZfKA3LTOt{mdNuAl&$ zZFErj0kxAp@MpqL0h!U!mJL+^$PajAw z=b9vZE+a|CFSJMbIN}Z2(-r!_x%i?k;7 zWcoLe`_0x90bMFHtPd$=VDsXh><6xiKU504!Cz>@EvoGluNVZGNyl&-#k`jA$w=xR zBPUz2@v_peNHxkI+Bo&4s+-Iu+F43~8tWWl_$-Mt3*>WRW)j({A%54C>QW6lZ@nw- z4EdJdb}0wVK>1SdxZkORU0R6%7+b}d; z$qN{!Q31P2D7fac-7BZZ2FUioKbr?!hdyaW4rvQfd#qeGT65U2OoI6ZXitTc!yVhp zzZm}{X2~JkjChP}uMwONIkg9COz1V0LfS`nj{dM#4j_gDsMo&;dGz6TuSeV|)ru$x zl~gW5;tKg~r!>RxNG5wwXJMOq=#P}2$u$Oa!(U+ZQTaRvF?_l05aA-7f93Ler0x}` ze@c3#^F|A<5gCoZuxZ4;5w7?BRl=eAI~jx!8wbYYi`xI|K8X5{dE>T0HH|W=rHfgv zK-?SF8wHM|PF>6TD|YD!7S?hWrcO}_`c#rOo*PjIq6?`ww=`&cp8}~Qzr7z~XJkgC z*lA^cE17wI0pEvUY~BN$Mt;(2t|sG&bh5}SEHtt#T{DT6aQ?=koUik`P2B=?%yEGz zxD>0W(Qwg}_erK}?l^S|%#qRd1Cs@vrZJLHkWP!q-)^#_c!R;Eotn|7l)gnfY&8Pd zY0#I7kHlU;mR*)thbs&ur&L)&}astQrRJGH$Ip6z`FdR()#KAiVmM_ACHMk#ynUNBl6h(tHjVRX22IYVDwWcdTi&1i4?H<`|Ro~5s6ZwTl zZ>F&XJ6`-sM_}OJ#kU=I=B>1~EvRG(gx$vpi*6N_{?h9pkGw)KOD{$WKsaV(njAL3 zyzHqqTQ2y&BCu1tv@>`7dcR_j7>Of{M}eE4CLZ4FV^aLHzYd|&h5C##811uyx27ie zHmB)*5bu#yE-3;7T2&5(h5mZE(}b;?fb^?Cm>>WQ#G|#JdJby!XcW zAj}5@Tjs>=$*aDTW3vM@*a=0Kq;HSt+TOm8Xin$i9DwdXT@Aqh{O*@RFZw)!Mv@Up zMZoIRHCaG5Vb;=h`fAcslK&YI7d@O6F8|b{b|Kzr_+syppyRdDd1HAd!rXhZ&NP!n zS8T`^H+I3Yqz5_AxD<%L@u-4{!bXUv2}UHs(l|MD&NVBL(q^w&mSA31>#Tm2p%)lr z@Q@qFXS#&ZcmM($$ir>D@d|c)Ym2~pY)@h2MGQ%^hQ*LM7QWXYSiC2ImIMfr5Dcr zcC?jCQoD66YpZ)bLc+9eG{e^pY#X{?JN>tO0lzmBDvt>%{)X9jn2A`1z?kuz5Y{Qw z8wG-{6N0(WRugUW(ei2adQNv859-ow$jH;*#4Q#P%k~U*w}jD*!udMO9pueNxlJKY z`3gOZz_5I|tMpFf35CNTr)Labo3*k(=Rp@KG0L(%cX;gn$ed8pILNb62tfTl^lQza z(Ht#7)BH|EDPgF&trV-&))r>S|L_MKpF^7a2e<7m;W!+huv$Jo1NDpLac~(i@oeoO z2nO6xE;>*qe2XYFOmpoq<$yZ3+uQY<#%MfE#NVG>c((zQz0n2bX+}x#((q0Q^ zj$%=P*OLJL5R@58{Hm)j0xEzYw4?5jAm^d`2ZWl17AqIm_LM<&P>=b34}nn*_YRAg z!G+K9q`)Jm98r%UWWC28+g3QwHJTHcGu30tSJi$ZLHxZ5N)>2i@`=`0ys=a)!VAAk z82D|rmx~_e{l?4KYT)mRnI_VjQ$^*Wk&q( ze6q4Ky_>nS{jtvPr%A10s=FV(YF^dY=M=QQozXujQ=rZ;QW+Z5dJws5kt2L}IrU-d z8M`t%To*ok7u@#4_YE=ptFB-=LCPnj>d}u2tXXsRE1bw{12t~=*v?m{$EltE2&HGU z6~^0Jj~VE}9-I$T{8oVWk)shL>Wn7N+5ixGAq^STrizG-E>-8Oo3OwTj zll_xYw)&JEASCw1S9c(m<5_x%*&p-B5k_VY+Ou(Q~U(!|u6Y z{7z)6-X`>s(ToT=`0M>)0^MMxJ2IrvI0++>s-~0&$4)W0>_{-w(<@sn@No!FVG)9% zlzdWD+!B-Fa~iw0HwYi5)GSu>e2WeZrsSYkL;PA`))k#Y7{>u;=Q> zxZ{`(N{E2UJ%5W*oHfe%iaDx|*M5$r1*p#N-KDzGxtK9$$KT*_Nl!g-Gk!KU7KVBL zt`EvGB%bETW3L}Q7`;5q1Oi12F$#C@Or#Pukq0$z!5%0RFi!k)6u>{lNZecvJ&PRM zo(qb!f;7JOfhCEl_03k0JHzk~o`J!d5T@dWvUQZWqx@DyI_3k8G!^}J&k>2Os1qQS z&F`SIq97iI21sW{6Va`}GZ@i$H-6%`BYWMY@P0khSG~?io<|F#dj}t1L*pd?H|Epb zP(=T%XnP9_3{g!)kQ8GP-drw?f9Udx>nuY$EgT%qEr0N_$J<)Qm!%KEF1_Wh1vxIO zKW}U!K2G5o|lFxQR~++pp1x_ zEA-G?>6-)Wq0i^myot!$+YlhrE?�$oc-szny7_9^tvV)$ig;FnknQ*vFIgL@hy1;id?!z=z*4D#Ca ztSK3F;Z`02ofNRWq?+`1DQ>N3TCo=bl0DU`2fMvWSKODIsUNT1zj5BIT!;H`;+WZ) znTkaS0?wGMZSku&E%V?lY1c>q;fOA?0y)6UC^Sd?@`MNI*i*RFJI4#BKh*;V0qK2J z{=`j6Kn2JCj26iI#f6iR;Qfmr9se8)hrA3nI~6-h~-| zz#zCl?^cse@(71T<4jkUp=g}o+8^KtvVTcAVWq%3BMupYr(M7KU&m(bydJe~!{UR&pCOC9aqn=7Ko`0E0ds0sUkfBonkgq!%(AtJ zPw#>Jt2L^lF?;0qIX{0=1B3IbBhl|v83S!2av3my3$KE>9Fakq61mP#g6uSG)T;Sz zT{IvPJ%0FEEtQS1ka<;kCM3Z(EWOjIQgV3i2s(b4IEF3_L=5hwgNtWB zcra=EEy7cfjS_0K5bP)WPnVhq{kdDs4L~X*Lon|K!eoK3P5QJ?;_60QD9$$bq|8jl z?9%VN#5G#Y)7o@P^i*GWEGlhhkl}GY_Y|%bwE9HDZK63YPXxsKg4oLONuoGU_|#l6 zI)YNK+=XN3gG-o+k^qWY=b1VZA7+KhA4Gpf6c|wr>5S^Y@>M{!#sjkPG`Z#m(T1Er z=Qs~%l?F{O^nk8v*%dV8QB&|nyU}X>Ui+zw%^*q5~f-Z71%7rfGrm>5ygp-2tbeMvh^ z(sq9-K0{?#^G~E4#`eAhoU6ECKW8_+Pz~r}h3_-DFq3zVsc5?=#4EN4OSJXNvNw@< zv3HqR4}V^^oGq57SN48g@86*J{^EelpWgyWfv6(3j)C<@U0E3@d3V!2aumB1j!S`e z=VhjDlG*|Ptm(QlsSkyZN%O!+b>SZd&Jl#W6}9rSo#T#GO|$_=tbv$Ijw_*6zhOmx z5K@&MPmW^%bfH@I2OCb(obY5kLt9Wa4&%i zD){=e5s3^NQWo8VEog<3&<$2zFP=JbnMu1(4z$tucul${POO?kJTz8V_zGXD-cEOe zVR*{r2=05jvgFy2o%bH`z6;0kZE$Q<4-p01a3;1(iSh4%Y5g!tBr9yUlAPp4%hFlqwTfA?rUKOI^K)kNPZ+Wx*T<_oD;w4S zDkVY$h@urc5)mN%BGA#d-Z2P5r{ucP0I znMJdXb@m3d6(hfZBd$?Z0NmqJT}7iU`8XUw=Zh@yLo_VUw>LWuUgClfZ?TAa!?s^w zBdtkOK3p8%Ja$9eygn|j`cU!bpDd5U&=?efYDzdRYodU5PfGE*qQK|$YKC4p3Jf^@*agGPF<`RJUHDZVPlx?Q#WgZ zT5Eg%g^4F~5nFXhTF3^4U6?z7mC8=pS>?x2DJV2OsG5)M5q>~B4Lz?|!g+?Yah53A zTAxG(rl$O5c4Tn!qODTRZq3HFg-5W^-N4@U=q2Pc?DB`^&}#lnh*0@ZUZbUpa@xvp zd-7p@H=8Ve;yqO|D_P`hVjH&NBxx!ZZ~0d-Io^j9KvV;~^r-7Ky>=`@uvi*lHJ%8C zpc+f{uZ_1)vERX6)t$T)Qwa>!Zv|?hVDJdTUVl^(fmvFri5}dn#%g|!MEX!3 z7++-8*R=gzR{)6rQYu(X6yNO25}g{)z|h8f6#EJldZy! zbB=pi=XuMIge^dh+(Crg%|}{JXkS9Yel#p)#RyADP_;xpB2v+@H&{A{ShL&xF7||C zr;Zz&cVw3ha9(4CN!m+fSsp}8kvS7S@u{yzIs zd&3>mRj=9c8Pz99XhfIycSHS3w+?n<#K+lKatchMUuE=cgF2jpaDCG42R_h&*;_W~D5odBdAq0Eg}HjaA>cP8xmcCkzbj zrg+-yhTJudQVYJ>ZRhqC{hX!HQRCyuzIpI#bk1iABh9_smMRQ7E0&t+bg=pjLEu?Y zO#=|JZO7eCjOt+0m6S^?MTK6^97g&3e3;mhbAvnztkvO+n+`k+_Y^SyO*pF&4`WwYmWvPvuAfemI9G$1b2&;T# zJE{=zOBJP7<_W9)hoxa92wzA+h^?Q;;H#9uXnH=ww^M`JY!t?*HcS*;*QSOxuu_3} zCYZg60o|l#nOgGcIS2wCNoxVD-#eju-KS>=Ng;isNHhUDv!TQ_)2aaWl2d-a0Lhjd zs=K2cXTZRpEivqw6to|jFC+CoShmDL-iFmpd>Qo-1eeKE}ax&2|UP!KRJ^T%7E=E5V7QY$H#tJ@i%$b&tq(%QgiuSbWG*hREa-@$L z?O*x(a4?(5b+q_qARNs%9EZ)9!pm3$#&}SGicXcwTrxxOQM@~-20(`s$`25B*jJ@L zS8H81A2gnK*!$bRBy;sM#SvBF39##tBI0$7XKs+9L{X!GJobsM`MkribIdue0}SY^x{gaLR8YML&Jv4g(!}Y{?P$ zmo0SE&84o0=r)GqOEsV&FtZwXeXp)Wc36pPf(5pM?SQ)+I)Clj!fNAMZ<|2VwE|kL z>yCwy&A(Uw7o9c(LIRrVVF2(Xi0~YuO^M@aM#es$O&M}B0*K?GGDAXeE!25et-QRY zBm0TA4aLaUKoP#$Jn?m>G!8kkZwribnWe}L=wL1z1HohE9no)H9uHS%QgX9xnjEg7 zH|W^@sTfSjm4F_HpBB(+BjCWNp0h?ecguq#!Q>J%ZM!$&N2^1`uXi^JVoV+ft~yIx zr9f7b7DOs6*QjKB;aq7b`aBZUS!|f83C+M&c1GO|@wc&}D@0R2g^_L^vc#L*?c8;M zao@y1$MN|u_9pL@O8zuwS%|YIQJ|cP3Cfx2!oQX#?wZ4Cxl5zShqFq~6^9dFJyGq} zrR1GOo=E!FMpWPe=q>*y((n-&hNbzx@kkdZTDTW*sg->1PD~mY^RuId-#QI&pvTLRG+14A+sYLB+6@HUD2>Bj3lub2n)-s?+p5;jiv~y zu;xlq-q-bU`24|Df`QKddwA8P8zOI_X_cW$u2UhBjuEWgAdD)6+IVlyb0!b5e4sE2 z$8@+*op`H?sg@oUvCYg&|^b7T2O?sEVH=MF1qQ)?Ux6Pgmrg=YLr zdMg)sm{VYo?Z?h-wP2W0?)J`2B$dfT#Aq}$;|^{|Ja)g)b~y9JYrre`uh!NzeiRO{ zd~~ha)NK4d&c>)sj}tvJI_b^Sd$;7-z*S*K6Hyr5fMy8_j%|MDqQ;XO=X&Lu5S*mSv@6danqGWYtHMoa27$tpIH`Pd(>j zDMM87NhlvRk9Oo>25a&wuKuKp0YnKk>pa$nVP5IOyW?1m#0ym0`f4A+k$MMYu8teo zKcdalom#FmbgzdnL?fZuqaM4d*1|I>^v*l1QdGi%)|Sa4=b2cJBiZXE9h-DwXVpWW z;w^8vv4(qe$=atA#MVDrJ6~?$a-(ro3-@*eM~FIHf*uzXy75H`z)ZdGzh;NqgIQ-u z(lp&KuP0*_Ee^N$$t74)*6V8O{)WtDiC48g{@uXh^twNYzyYKNTqK_Rqt3c!sQr;KPdz6yBg~jdJY;nG8rp^V?j#l!&u-eW!7X<3RQ9GiNUBiEA`imN?ncciy`3s$o<_0)|81xR;m z0<`Y{DrW#hP7KY>EDlaC_6!bz4{(7#@0p-j`qg-R{xZ249sCzSO^&i|0yuB zu`>S;$oe0UjggU!{eMul|Ddca4FAO#ng4_F@DS1~yV)8O(u>+yI}*|>IT<+qtNIVf zNJuZJ?_m7zI|Bb)V$|wFfAsAgjqPa_ZLIXI|J_a4*ul`=%+}Gy9)^YeTd*P_y@<7; zjggtPDdGPr_#cQ~*v!Pl*xuOM(Aa@ci;$U_klw_^?4J)pZ9-n&?*ZAUSet$KH-=&1 z__sJwGkXWee>DHAwY2_!D=dWcYGy``fBuof!NEev#=`V38ULNRWvi=Kqfnmind+gx`kwhc<$OHm)#QwBO7lq-9}c zC1hmfWFur`VqwuHq?d5iw=^>pur{?c{?uEZXC`E2`oDETHg-0)?+4}ogHSO4 zmrxmP;PSX*O>_}p&|t|sa940w-xpg;dpjg~3m7yC)D?n&e|tMB5C301o4RSv(be~^ zxAX4mlC-^ZozA7^3N<-k@X$Ca|C&^PXz{+@twi6{*igtEQggG53mAJ7dAl0sCP)V6 zx*7&zdHE8J)-}Jc)^Pq@V0-6U`xXNPt}mnmn7H4Cz|;;$+Nb@kS`Z0t1}%R#$jHFJ*aSpQ zlcST<0Wip9uhvm_6Q=eA)}=rdKelb(z~!Vc-o*v~QxzT5g!K2!mDK)1kMxb<1HVWm zrX>?Y^}sQJrUGEcaoV}G+&~;tF;i1gfTg>=y#YmtQ5C8Fot4W3cHoBGNI)fdl_iw4 zvd__Rua!!|1HGs_{XJ6y@OpZN`k)N;4mF@D8YvpU-agAEMwX8@qI!7%eG)61E1(xd z2WIwRK#dDPZ4|f!KqZ51s|rZZ@bfQEq=e%CB&P(F&jEjIkAde54@}&N<>I4q$5X!e zzXD8aKYVZjx(P5l+g`8NcP}ER1YJrW-B06yMKjyaPrFy2c`U&G3ZOlHXU?&{@w0rX z1fXODxC0PWfs#gCR#{yywzE5jIFLrD0ZG%%>np;{JQ(4;R z!-6#T>9U#~{?tE&W>TuNXKe&a!C7zn`_))wZ3AK7)K~{e$=Ra*+4cqmH?b22IG?!w z#VSRugyRtt4*PdsR|c|<{ax%ZY(1STMPpS9NPUG9XlQC`NC!%K2fe%)1O-XSI_L^A|jBrXMpZz?-&da3_$Y@!vGXWNGT>Y zv9j{^T16EBNd5wG4-9A!Ku8B9zlXR70YvIU%ti(ze}-rP0aOSetRYI}gPM?kLIhz> zc#l+H=`}z2Z5gRe<5OMht1%7`(DR1zK}yNqBLF)pet`6Sr38p}qm_Sr<7NNgYyaS1 zIE?iGKND5Ln}r(t*UkxqqRLxHUrO^Y0lhHI?^bzF1Ykua_c*@S0z26=%=P~4k&!Z+ zzXiHUQ+l01SiT63QdfI5BWW`tHvu7jmDuZ?el{Y8wlbf^Ks^}bGb`Xc!!RYDhF{9zzy$1bp#k;i=dmcN9O|? z{+=E~z=wSB;~c2v1rV!#q{|8X`K#{R1>s}4@Rr~yW2fe8(bdNZ3zmcBNEMI>b29@e zXUKlMeZbL+g5HfhUhM^oYC7YC_xdBMTg&*96fDbp=irlf6gp}sdkRQSKBq7_pY%jd zowHX;wRYn~P5tC|@iO&*8@2mX_Jv0MsaCf=>m(+pJOCOxc{tf$~ zp5V?dV0)~!C)V+!61U^}&jnl^K+DO0643baJO{xH$(}&@a{KVk^3wTeF*&v?SIOva zc<|@^>DYb;gr4J*xAzUU)%W*T8Z>~C&{ij71GmS9E1s|S2!#Gif)#@!&PUl#cZwe) zCUbE+-TyEdt|jb?8dwjHZU=J1=Yqjs`^Dp}r3c|7nOkpIn>JIxhY8p_4OIKr*MT2w zMek*FSN9HJ1OzfAWW5^r#Nce_Lkt6hBl49~n#>XG{j`c)YvIO&2_EdU`oTHib=~ z6P!aOENSS6zI&}96c^3n>zdHFr!ga6RV3F#wL>?eEL2&s&F;`^8x>{Eowa}uAf z=d9ixN|}tA{;>V7HmtOXyuK3#>QlSax|cDM!9Z7yMmY^U@*@23?_&0VXy*vV(5sr7 zgkYI*&A~9)PE)(cXCz*+1o-Vw&__2} zbedO+05XU!**iuVH(B6BkbW~pf5xmsPVMRv2pvC3`d#k%(2743RF$=rwn%v9J;5%m z3LS50k?Fp?aH@elGI@QCXO34kCl;|+6V>yS_^+`2quj@Gs@>@DyxB6CEv@ZY8m`ky zzvefaQaHq%Qsck>w?+k-%I~o^q_9(iS3J4 zGHb0K_s)^`wi{-fPPN(eFdh_~`tyh&BI+ssWdqA3xk@b}s6gRnId*ndsJ(cm5Ad2=ru?iI@sAWB7UMpO`w+15AvcAY> zBF$Uj7_Cbq*r1JzmgMm_Nry5%))=7aG)2J>+rq&plUz6leL6AfrQd*K2JGC4oLD#2 zB2q&Jy$gw8?QEZrUG~I?S+W|bn`i5<-SnHfw|78#%f6Kww`g!4>_FB5_4?=*mBAbW za8^dD7|g&)0ow&x?ynJ67Q) z00V5aM6~dnhPuF@IwoyjN*`OCI;in5Ysc35bnE7 z)PsD@=h%7;T@h)U6`c{VE@qt;*K%`^+a7&5q?|4Bd$T>vT&bsFK#S-oRF9)p()p-F zyUzI3Cp?D1qsY1e^=gPzfrW&hed{+nC+b5I1sr;3Env)>4|rl0sOBZlz2ptG$66z6 z)qD7IOb7^3nkQ7B3(5F+T3bcsMNjYhDmE8N>fzlLujU&+tMzXuxdrP+D6HC*M+zrX zt{$hO&C%az8BTf*kHV%Spi&P=(Y@@>xe`KRg+hohxV+SkF+XD&P(!xY^+C8D=+MuM z!3%bEm`D^%w()z;Q{g2go7t|_DMd*O!tRvQ+@5FNgrf9nX^tdB+GH(AsSP5UYf z^rQC@5ql<@@hAi@gYRKF)!NGl++c-*`@zx&5IeiQa35d@pRGJo5` zB*aLss@cG?95V;Z59w8uK=W=j=NQe(sJa>6r4rg^vph6d6{^vBoV;rre*Ay5nlvN+N75rMA{>V2ZtVt=98$Sp&y=GWQG&&aRn!4HjYDiZ1(# zR7VRlTC_CS6^FFc3j$C#MKB`X`}<5wJz&q`%v`s?T;;BfVav7h5+bo=;f|ui52L8L zB@th2F%rj16*-;`CL2mdRl+%)s`_y;v(Z(U`5uk}*H{hU3KrEildSrf&9oNQF!iDy zB!qmW?D@^L6_`!HwN@*-*KsnZsQIdDD7%wHV@?`Z>k^^c7qwG*wos^HXq2SUs0W+F zhs&ymv+l{h-p1P8ev}WFsh>p()*R;iN^ecrq3XY+s2YzhukKEE#vR^F zf4NmnQ8aj?Arr-J%I--5`?vG11bP(R`w=Wps${w>RAxG4DUo1P?mbDBg3_M1s;}}d zoXP?Bj?JM#-TI1W-D(Q=r@4nLfz^fCdqZYEJ%ob_o+srp#+tZoMk9fO z$@Ew8hWC+w!=I+e4CUw>*mZykHLBzJt#4e4~fwR}cgMJ6$B1A)SU?^Rw53O0vx(Ke<|$^i%MURPs`b^R+T`Y%h8 z#J0*kJo;CrL=HuihZJo$nL%y(t>CzpYsQOFSwb@U8ri()?_ z6_z!}5yWKO@+GJ|W*;)NyAu)Mf1E5)r~LHx@h+Q)ms#b+Xm9Z|+JV#W1C3UF7nrZ> z_rzh1o%MZ`zi=kN@_PP2>Frxabn8uU7<4$Fm}A!U4H{bGT|?%oLhJw0nl>AVbo{tz zD<%v1+M=J%7;2=}g3Lx8by?GzbhP{q)W#((WSkUH?G7h7Fw4`5Xc(2@XhCN}A;oes zBCoBGihr(_vFlIU=1MEJ&07$v_={VlNGPSJoj%V&l=c)YE#! zMm1$0c{B2qtmkUah>45)#&wQyf%dRo5LPHoRZe;)Cco`|%2uo(S4uKz{=Uy(@;Y_H zRctE`&b3CvH*k@1*I7NtF_1ve_NTCqm+Dmjnj2b8yC9Z#0E5R2(Pn}} zW#V4zw4Z;RA|A}P2{!mg$+2QJL@Fcfnjy0fS93DP8YheEMr?IBZtarEabw?sLE>L8 zKiu%YPfRKA8JhsnxYms-307xA9$;Xs;kqmvKY3M~k2Wz5-neaZOQ4(;a^J4B5VYSX3A*)e_g8LH&d4r|KghN^m#fSbyX)CXBwV^e+EAZ#5u7A- z5`B_(6Xa90N6(&%Wu?c`doOx|9iE}miU@=h8BD379_N!%&vVP_u}o!Z?DJJQUT>jm za#+AGgAO`zt{ID?qNEu%iO#C*hQjWxK*B4Gwm*M$6WDY+Ix#2^87@y7dA40EYtesa zcYNNaFD@aQ>6-x&*VuI)!_#cL4PL)^Tg_N|kI#*ud$7Z90cI9QrdSos%h>^Q`f$JI`2zNr(_$S z$+1K>=iWQ8o(K?Hl;G7B=8nKMvTZ|=-#KQhA%FW(6C2sJ`7>?CP=`8v2JJOn>gxXXo$qHqTE8t^(V8ovV_? z$AL?X4?*Gv#FXC`Qo6;0K?he0Se+J_#sU6;erOqb3-*$mDgj$Wi0Gu8BkAE}FYc93 z-AFCPjOC=36&(gz(h`-kKTA6e6>xU}$csBTl6g0~mra@(ZGG=8;I*z3`S$9m~nXEIufQd!=@8&p9nc*-N%^G3}K) z{rhFmK?mx8-@a>N&}mA|vJcHxiAa^sgrMif~yD$9h!i-20^v71LU?`*`yGBTo3N93O0(beG3?2HvKY0H&s~~FC`;WTVt_k*!b+> zpq$pYGQB*Z9J;(cGnEB+fMr{uPtpMZ+&d_L0;a`OWfxeq~h-#=4a-y#0Fi}-6}80rtwdk zNh$y%_#RE!#~RP>7yU4|@vs%v;}$YaHFo{TuTW-07{%agr431%(cQC`T1w=%7o}>Y zj}N#XRKZ~PySqV5-!YKENS?)g)*=v@EEri=mHXLRST6ajPpBIB^Ck^M^dC1Y`Waqp{zLg-NrzR6*G9H3JDG@VPX)Ne_$n^&G_mc(@8oEk-J{7p%WSSaM9{7`LtXY(NJmhM%dWRrz+!iJl4jF3} zNfltVy*Q+x=(<9(zGDF`iRLn>LRn!3q-}Pc6SHFc$S}je+iFKsvU3ye;@h#TiL&-f zFH+G;&NRcc8zb;_9eO6>DWy80meMl4d*s0*j$jJH;RE|QRGebH^jn$|#wERXwYD!W zFVEUADbqu*NY{lFNXl|CT?^csTkwn5!O)$W)0IVk-`9YzCwJ7yiSD|-R|>Cfa|=gI z-AfZ0!Obfp>1uhuB)_9|?BR`xZL9WTNREIRyBFpHT67s zmSL}-G>zLFHf&xQ4I!rdiKg)vwk zR6Y@ZCJE;Wr4um$I=?JT$Rrj$v*|uAKkQ+M$zJQ2J){`CV)JL~PC=%CL2Dzip{>D!tvG-hVT>t!X|9<2A_)c8x!TCTC2;(y z2dWQ3+=_9hfl@SYT>vD#y{muD@x}abmcvzTxJcoN>j&vO(a78N^6*Xj&$lniMuOp7oQJPDJs%@`3}@p8 zoU($8DXPDxJBDm^4q&v8xbu)vB zod#IB*JPEt6?9$2#d%{s5hb&?;E|Raahy6{bVnYAt8UPA{MJF{J8%6p5KAv;48ck(PG+Wu5GiRo z!D;qoV~>^8q=z9`omnbPgoJxduX+LsNi(MH3!jhV#QwzmaisuxsXhcHHyNAA&oH3* z*7hrzGw<|4+Pn=0xcWi$GN20`dMj#EV>Ko3JH7og)A6OBZpnm-+r9;D>>AAyNrSeo zY=8J5*=tOdvia=%NI(M?e4nrx&CrMV5L`Nm4`O;B3>YLGePYm~%zKuLmhy^TVC zDs0R+O~)qCb%HN3R_mvy7~;B|)yI;z{0Esx6|HV?P9kJSm9XdgblNZK^)ScCqtYQ% zwugxZwoQ+RRELdys&}F_$Y0Q>wR(r=X<@08*K%cf%0uMvY&}qi<8zsI7uwo=8ku%qRV*8x%l8-xy@Nnnw>)awAyPy^FsyEe_6JU;_N z)f_a6g0eNyXLspU`}V3U)hLOVP$y_B2;U3HnMDt{)Z49kb4>`RsP=C?Tx$y_EiS!9 zjVRMqe&W5{!A8D3FSv;Bh0~>8;kw3u(6jf=74OMrr(VwXIPjc^hBM)AdF~e)OS{>& z{CLuPSz>o}u)-YQ5;=)Ni5dJ&hL%zc24@ouS{Q0LsT;FRxSlj^e7529!&<~;V_NYo zh}9<@SKJ@XEcEMzZh7iG;wFm4SF>mxO$z+aVw>Z$*|H7OtRAnK*H$Q2QO_qW={}Cu zS`M|MJUL10oB1`o*_a((dGkpjsr)(RBktCk%PXgylk$u;omu=iU3RnAxbDtuisOQf zS4BK_v}8*~UEN|6Y6LX+HeHl8E~k85Y$VpDL{e=pBL4Vf7qAV1d`%Rj^DVL8#3iOb zhNpYqDw}X{E$Q2TvE1w1musnfhAu7$Sf~O*I5__p4d$Pm&_M{7P`tdIwNG8^!&0%P za8i3lbseU`d;KEXBFY*o1~j|0Y!PfDc)*22ulSmFGNnUCz-y=jfntoZaopgxcbfD* z1U(s5-&WYko(tWhx{UH@Ne1?`>nG_I@7o*Ntj{LUd^-&+qKVsK78Z-@JEMJtBw6)5J7IdBPSKcS+KURo-B3=x-(@d(o%H5dl z(p+2FS@kSpHXgyoxfBXjj>3cg`H2r9u6bH4hQj*E^dr7F{a@H#{lUn}?=y+U=WEPTp{loXA7Z2QTB%~wg-Xm*A>#1lv>C*7<`-e)J z-N{BSrM2q$cz5#a9%TYum6hYANbp`=Q=1jUCL2i*QHNbqF(p#G&u%Ad-BX->C4|VX z>(zzIaCJZ5ao{%>fv`T5fMhC>;uAVtwnp)$^O0voD->CQ);A{+f>WL~OC?l@=9pMX zLIu)YUEZDQi&7&lyX1QHElSO5PKbKAM@h+c0+c$$QC4DJtgb9;qv0^L9c0{)ezXl8 z?fKi0mR`k^f!&LRlw=KcEs3R2r;$|dNdAHa)ssPMSqh!gpR|Dc)F91a|-g zE?ul4=x;kh<|t%u{nN8pkf2e{6%5U8K$q-A^}(kD2`g~Ej#BWZugrJ@%PR>Qi@m!l zD82{H&}qIn0yBe^(a2SL;$t5{@^`9?GT?*CSpl%)z=q&}P#E`i1a&807$B@gq_}nX zXVmamV1E*k%bR3w&nISBbma>EU1aVH#7`f(P_(>|8q+nj(U}@--!HE9P&o}uZpmQc zsVqsTkky^$QWmXujcKq?0d$-EK1Wd7&)f$#Lk}YmC>fAQti3KOAj-Ap(qa{?h}OTE z#VeS(jawDKdr!Y;Wj*_TEGT243}d&lo$N4!=B@%S+juj4+D91eYqV9E1c;@|X=T}h zV7Su?MC*+#HD)sYHRalw`U*)&fC%BezjiY*Vjt^N(3mawaUGc)&0}cUu(~hl<^Bfq?VG_?H~eAEO3%-haoaJ>b7}36p{asJ zGO-|&R1{y4ZU0xPAoZ0YqglFYfm7cpdnoK=Ci}M=45{Hymlx=?k#;=uf)}n43sB-( zJIBr!_(EcWnm+UyMc4Z9L#>&CqygSt>r<$M)nB&)aM-#^8yO$axjJ`@-nvk`p>KS$ zr{70?!$FOngnQpwr}f-mOnQ*jSwm4#;hi`xr}gB}YQ^=17w^)jf%StS6@dtLQp8dM z5BjbmZ(*}qxs9TFWWC_Np&3uZg&Tjbs7LUqy`}PnJTUUAaw$(zUp2o8I7JM&q>MjL zjCk$Alm-1675$Seyb$xVT9c%GyV1L<@I)8h>u`r)2VZf@_rq-Q?(Ia?Or0^KJQs3M zBzYSU=N1WN=lI%OLBX&sI%_TpU2n)jI{d_$DR`|Xuv!x^KZH~#B~Z!h){Lf@R)ZEE zVviHaIgMsj>a`ao@R}xCFA00Tht>$Yd>eVy&@vfv1TAZy#e4LCuGeYn0@TyZHn_Wz zlts+W;BCEFP0KG7$+%&P2(+8`R>--o5GAE!^?OA1+YB@PG$6bTk_{oaKhMI8&%3%R z@#7+**ThjU#j#gNzn5tBF1Oqas8yohE%|Iwr;rPw(SDUZLspcLE~kyubcRV!wStka zHv;{memLmp1_5p3o>sHY>nYs4p;=C#`lHy)*-zu?F+*R-tR9k|^tc~Kd8tTrH$+r= z9pZz@WF#7hqZ;K&l1X-nihhRL!jM{|F4|rT>*+e5RcU(OhNJ%Y=ohWS!T|XsqP1Y` zYgf|!X;bF(zJ8pBdV&v~?8gIop2RUz}l8?2u)Zzx{ft@Q&$`%`K{y&MOJxmJ^FmJznBr6V4 za*|MF(tHS{jb!mh8C1X<19_D!znKC)H-Wa;q)`*6QP=eme!jp)3_c9Pmu&Q_Qk_ib z+)UZ@3@MIBFg3~c6IrStQb6vYE#nLp$7gFTyz9J26QS)mzDZjlK=+tUZ%98=`H`H* z6!~611w1Fm*^#@p;SA&pG5F{U+k9G|`~}vO1yDjX?6MOj1zkx^N~42THi7QJW-*}E zvO6Efg)I%8WhJ!dd>;R0h>Z>k0mZo1_~J;W{f-Q;C*>h_L$hvq(qVObvRQhsm)oro z6q1Y^9Um`dyN@yjRbre3r7a)DkxJ}WAuZ}5l`kV?F^oU6*uQcLx1!oLZB_?wzMj^jBqz$=VYcLY&_(V9NM(jtC;-^Q-v(p=+@G1{Kp z^Yl$r={(cdge-#FSR-K}cv~uT z`(g=W=bBOfMPiUHs_p}`w-%=43qUmf;?U%yL6^6&aw+J!2YcioI(&&s^nG^o8UuWWp~>*?{lPSk#4D+(!5NoOhVF8 z$V!lio1dMq>$jIJL$|a*UVI4~k8hR0wQ&j*=V>gCWt0X~-^1|{D~9PkVosim z5Q)L4w1DkA9^=C(M-*&D%_Q__-3l^$f}6T5H+|QlNj1Ddv=hsESHX<~WF3aEt4HJj z3YsJU?_!JK*Py1@LI-LzR2HVN59o>u+=+G5@3gkorrX30)_?&(w(lERy89FQOLDG* zjt_F=8&EG?QvUAlgM%ourXMgC2zeyG3^F2hwk?GFBv> zP)8FfH)B6#CFU0@9z9DJ@o=FptX%u$Ml-PUN||^j_A$L*H%rM%; z9sqlKQ!FLIyf$q|Hcls|Lae@fw*^o->|xYf4~>?7IEAq}s_sjbISk?&;XYg@>R2|< zd_m3FS+G-w_0`6p-brjlh-sDXEN`dPTiB}nK8jvs)#_7}4FyG`wf{3&v`4Ij5<=Fy zt4KplpnOu8KIpr;Y=le&T@tz8u*_2_5`lNV7%97fj_-lac4X;WsD?TIfD&TBHh|eD z;dG@KYRJ;nr8_{v31IPl{@ave^qF*C&DbeE!tN5ZihNN>PVG8eH0Sy#DyAlj-L7rV zp5JV3X)shu0lsM3_v8{+92@w>PzIMb;{~-851|JT{?4=47BeO3MVz9R zy|Cq5l5dpc#7x@f8UP>WpL44vaL$BOm=;LAzDO{yC<;&6vHIwL_t)x0tPX1oykt_a zYPmkm58R|Rs32W?3OG+Hm@#ngz6#QKt;H z80HEKgp4A43=T!;5R@GPujZ#PxtQt#-Wc`Fg+W`ELCZEFG00PNE`;)p>2Qma|h5Wvn<+L*O|j( zsM4PiXOaa8UpL-|a^y8NKVxN3PRYc2$vY{ z8{eFLd+4?V2Z!ecVBl$?siQr4Eje}4OqcNR>e3Cj^juiJfXGyBQUmLQ)#m|AJUd@Q z_t~p(dY6=N_d1O4z1v)gihB-?&tjIsa)eV3`A1G?^L^hmCr@2ey6&TUj3@Mj2VK9LZTk~etvo{0 zk5u!&Hrt4$xa}A_2=OZB7NU??0mMD>Z7*Z$+6XNZ-W=f}V_IQdA0Tz2_K$?BA_r)F zdpiQZFF+m#JeIOZcVyLpo0gp1JCrljC9izfuS*mx4GCFGE6A735$=risWLpB#T%W3 z8L0M=_yop)(yz(y=Zqbh-N1+mDl2BsmK3F@k_ft>X6UOnOaY?_o=ay!-ySdKOuvMm z+Fx-#^CZ()&5Uhd)88v#BtKekDy?YWmWI?qKvb{6zNkI?#~GgNB(KJe5E%S9dk{nRiDj0ge$EYLPEj zP-&|*lcvbm#6~?cB40&G&x8|)n~hE;YGGUbb*MJli1`tZoPub*L&TA~E%d~{_zZ}@ zt83JG=`h(k9^k`ae6EkkW6h{#_oC_=o_{OBOBk!y=z-L+vDkeAlnkk3s>L+>YVx4+ z^PtTK@1!+}MFpl%){W;eMxCzf;!eC^R$3F&Qy{yBKmVHIC_4iJ@uyi^^qxkrysUaz zU7o@nJ(}mpuRmG`vJH~loIA>w|A0vBW6O^Qm~PHDID#~rnUiHY?a=rGMmZWH{0mk# zw-CBtTR2ND67Kf&5Y2I2{Os5E-Q5!79UfZb_Fd!Z?(eJQ``QNrVHe@z%X`89bmPx( zgZzetBTz@)%5p_BBCH}o9SyQ$9V}3-3?PW*Ic_cg`DDt4WQ#c;X%w>Qv~2bZrgx_iIfF z_*CQ2`Rls`?%ow)sWARLL+Ps8nInz{sWHzoYln$9cP5EcMvEy5 zcBO2d$uWy=%F9i9>!MXBkNDW-R##mxZa3-W{SD4I$9UF5P2|y)TsjL45haIvx%Z5= zJ=5($2B;Ha*OMNuG=iAlqt|nuJ;nsQpXo+A9`QHDEOce$(36~FxFwp}wGp!|zoQJ@ z*tCdeI|$d+pXr)v65|}e1>hza*};@qO#vNzab&aiw$lFX2cW%+qhr}0+y>42p%-nb^a zv3s7V^xE|LIN3RegYj|z_0X{G(O7vAe;eSYq*?qtvX`%oU|-5?YDnZIaevC@D_Cth|Hp6(1b=^fD!wp=8k)9s=LiyXUNVeqo`KP?JrVpo_ z5WPo8kqqr>EuU*;5u#entK{GnEr(Ib*I9JN=&}W+m4`>jc(w5YfC{!q6WB6kN)yR* z>SKe~bCC4-WV<2E{-LoV=>fszUZU~T9r9EtnxC}4TcsssP2Ghbk=mqvPY5d6+!z<3 z;_mK$C$kpbrFP29e1FudYjd{yvA7g13#u{!H7P96nnP6?R{X^rJIc6e-XwB@!LvvVO4IxrWiu{R(l#^kd((kl>^H}K zcZUdSQ>dc4vBn!6o!Yb*?4D8ZRki*VZ8W@;{k?`h&Ozc!nURNWn3IPl!sTv!EF_B7 z?|74B9T3I1MEoB8`}5vIt6LunAz9Wk7k-;Qck%76>*@PU?yrIy;qCBKL-qi#LjitY z<}h^8jJcZR=Jyx zy-x^OM~xV|^q!pu7P!r&vjxBC$>1s39g6!AcI#URnCHyCrDRbUDAmc}HbcH2IWrN78+5n1C%F!jYNYsVZ%B4gIA@L7vx zp7AX6Thd)cLl7#fQ1H^1xlsl$o8n>o(=+RhD?8jyVdb+tn&qmV`3gPWDs_=dqD((2eZ0?iMDk>lF5Ha9)CuU{F864&fgzaz_T<8R-S5ojWTmCQ)oP}ZYq5|4v@S#)^4z& znd0Q5zIDq;+**RUk3eqR5;&QjAN2g7c}P@x-o4)1CPqjM_&Q%2=pQc}CGVf*-@a-` z$e*%@ajguBQ+yL{IOcg}V8;KGuGkSZQNl%BX(i5`TlBMye`-U8#0o^r^P>{B|GKcD zm@+K7cR<2fvrwGZ;001zoX)a+TkC1GbTjZabKn?}WBb}m@oqMrX7hrke}R+nyYx>e ziyk42T$aKihIS69N%_M(yLo-9*Cro4f%mRek3n!^0sDHk#&DtlF`h8Jwczx7`Fd92 z^di$NO4+_Ox-y^8bhX~3O`WG9U^?wIiZP26ZU;@H<~$MICnb_-p2|W=%sz_8_4ErW z29gsBt55uqRzfS6gEDdE_@J27*Cho=Id4KuW#kG&%PLNG9prbI^#_FKVTdG;575sC zKudmKc68jwtW&3dG}zl+YM^7)JtWFrDT_mX*gXFf)a)a;Oy4WXU?=3Ex5y~;mD4IjiLP-pvArtWc1ma%X1$L#TVWVFu6ZXeu)P;mPPCL6Jmju}H!KVw@* zXee3uJ#x2?5`L*)&I=SUQd#q=O0iGO&6QWgH>H^l8@CH&&=l(^~{W2lXEi%H>n8z;GUr z`gix;>6M6hjuX3ItLdR#7>mQd`rfl5(rp#RW6ON;Q40MSI?yOXTo0b0aHQ(}nVX7> z5lvl?2!#H`DpE@(zLM}NWMUK)bRf#7i5dHP-`)Hp*N>hxHz{U0PP2Wu6QB$z~K|;@w3baeeZ*D(Y+=5Z|c(*k#Ai7gJWPnzfW8uR`Un9_XtEg|B$u~F zS}*S4>Ins*TjRJ_@8lyg8m63d>nK$!R zsNw|S5<3ZOp$6Q^fBi&i`j{H%>|nZ9f0Cbb;JPvIf~xux69U98gy!$iqvgy_S>cN_l*p1ETwt2wW+6)}*=aN05M|v|6lgr&oszQwIuAX@D0mqO0JIlagmU943 z9(1jd#^~wy_gzXN6=N6GRY3NG8WnC&-}1=^zmGRK&BPU@IGC(-CXBTe%Zw5Yb;9a* z*s2*1*kua^b#Nmm9j?3xHmxl3C)MF291?M>F%xe$p90C`=+!#%(b=gn9~{aP*_@Z_ z)x4Fy^ug!F$@J9&gI2(|G%@X+r#)uz0B54sP2cc&kG{T7k(fe@h!#Vwzb&46JiYTR z2C($pIp`HX)A|#=<%og^8=|owBt7zMym0Jyn%C=L`g;~# zw4A=JUd*ko=(7(6Az2VA@OVD5ZLorbL2C)SJ8N~WV+`R1 zX!v+y*89KyQbWzP5$r#1N2Tu4j$0dDJ&TSgO%F4M*JW3UMABCaQEs`D<}2rdLd%&R zn*%+}q6TXW(x9$o|I3h7;hT9L~G zH^*%Q^ET~uq>so3B{4-?Uncn;Z7Mh=41?;$s{f=*2YQ_Vd%(!^ATLC%RBV2%w77CUmw7}TI(1I7z=sR zf{HOOp!y6)(MKk7!lu4j0A_6n8k)%Tm+@^n{#|3Je#bP94ulR8mC zRpCC!ev^g+gtC%Fzqrp1w zA?Ul~YIyWhvFl}cTf9IZ1phJAV4U8G1-e-N^V}00<@-TAo;%_Dok^coEH)yMeGYr5 zAjjZNhs|mc>5rkvFKg3uav~Cuc%Qp0P&A`isX^%V18*Y8m4G&c#{pw7<_zg@qf#gD zVs}YJYFYy0QTTuw0}IX8MBhP#YQ-eN;Qwd`riW6Ko+S<&s;$&7lTWdw9q+TkVsi?- zh_um&hJ5dt9!Thf-=ZOx#qBk9#|8mHC5H1V0+uAs#LxM>@L{mklIkh69LJ1@CcI16 zHT>Qxe0?o0BxiUM?x743mY7Gc{Ya=mOnWnhx~UjICH49IS{f%q4Uy^lyHHqVK3~Fy z$CkvAOZ~(dIZ>&|-11MDAL{P%&ghg`S=&@k@w`$AqOn3L&1Npy^p-60qO;>yA}N=I zWL*?gfpud225u2ny*nkO2NqB*(-#Z4Hd_yFl=vTki8 zPfy2m$af@;3pR8Oe2830B8bDKdLs5jKI$!PprgTHCmd29_L}{4Vj@)jsTP7vW)?$) zv_C6TEQqOxeHlbsdzbnlYe5o*ITv~;dW7JlaJ{oSC{@l_sk}p%M>ewgJBBUO5J5L5 zQZl`aFklb==3tAg_qj`?1NMhi_b^EWtAE76sYl-Uod~~IZ%BDHWCjA< zT{ZmU^Nk)g0ud8gY!?jE8}{K(Kf;a8q@Q2Q7$%71%!|2^*xFs?1{YU_8H3c;Xkrd) z@5JYvvi68Rt}oho%feQg6nMO)Iz4c#U+n9X6fR<40`115$V_3v*?T>Gc&5FG&EfJ$ zuQ)5i9a1PnH8@Ms{0vPGuhwj#A9Ay(Jd?zRa4N)@^9UjS${h~N9lm_#g`vWQc2SN2D=e9iZU?~ip|$2WY3U4L>e?l7 z4x|%sAb$+| zi7A43L#amMq@51fhjnFDg-bVE$XCdVrMQX~>}+{z`?#X-HnZMulU|%MfPz@VPEprT zSa_PwWE>I#?L73vR$iDI>=`}3ZUV|2ZM%V+y;(Oe5A#XkYF1Qwyfc4CCB6uNX-*7_ zUU3FZX#Ez<*7`!U*%@Lk6r3QSA9yViDR$0X$sePHbF67)et-*aI+65lhSbO&5BwGU z6%SKl|oqUO^zx}`T=%*uQQ9(r8D`IrTRY3Pk($LhS)roBugq(95 zu&mNHrOK58Br9%f&V}g95uRiS=*TOACap2&sDbyNVifJD?%#Ax+3DF9U#r$ub}o5a zB6Obsl*D)kN63t9?;ZP&`rd^-wwp-scvW?Q{TxiHQ*8Dy^oD8+qOjRq;I(@&iJ{jr zzCa$De~U)o19%eRfg*HUb_edzomu%>Mr&FD)BG0{_92B;4(?oN#d~G{WnI;qX6?~j zYun1BNpH`jwECV4DzE;3Na*}gY6=vzEavrTCc2=^_KJQ(PQ=DDd(kG5dXZ-}=Lyc; z@+2AG+(t8&)WaIG#5F!)nes96h>jZa#L|sopqPuj(J6d6E&@v6`vkIE0p79Ac zTEBXvM%m+o2g%K+Y4@4U=#08Z06xDXUQEpf9h}aw8ca;9k<;~C zP?&N9t13pjy~XVGXGYzx1G2Chf*w~4moM{k>0}8P5G(W_jX@$E7&+2yy2(Fi%!*V0 zK+*hv^PZSELvBcx0+*@(1nf)tNCV|O@(mS6o8~EvE7n;F6Y9FuXlM=kCR1IM;^1S| zgDdZY069l6(l93B?DZ&W%{rp}C;*hiGC2cTHn)nrvLw<;P|h3oC(o{xrchAfDfY&BupVvTBzl4D|iV6LYjDQe*u9xR< zbn6ScQLBNcNYHw@Tu6UA%XSJ>s04+@crT0Jrqmf;-P?ejLgCi$^h%z=c`w_ps_bb8 z-c*EifwOT7AI?1O^eyhyNDqAq7>18A2n0YZ6hMQ9{;>q}q`Cm!$epQZ$3LnJ5YP{- zn8jvpLiC>i*^OP^m*)0~4~Ia<4?mV@hT)#oq50D6SRBlD$%km}eY-k9&3qBWNIU11 zfbkX*TxpTtjiAC;CIL=^IB!|p$hfBncu9e+)#Q&?tf-BNr@!nS8fH8ZX}%M+5WH{P zEDMmi`$+?)EFcYTZBK7m68ee%+^{e}oENo=3B`vs;jNAgYb2Qi=*{XFOossfNwg?= z0r|2%Y_s?h_2Y%CW*`vypNX>zx^OCGvRd!t3>&{pnpf5p!9V$%AL~nkdE=0sd~B{op|nm%GHP`+S9HLv8`u@)8pf)OICa8ynvvPq zW7xh9<`9Es(zCBfaoeb#2*SRNxR7u0`j}*-&MDf;!kBXF3oL7p8K3lrALQ4I4Azf1 zkdHlGt7?WXXN!!$@PT%Su#lULflayGOm4L&ENthzY{qdsc5br4_V&UGXdCM+fEInl z^Uas|6(a#vPn_T`eh6Ro9g)h~?C}NDH=F#IX`P~DoNM0To&MEOQZv*Z z7Of9GUG{hPug7&*LY@R#yt8SmUx^If@hN-HUYH0Yo??NT<+#X4;&4p)F66wn?U|IY z@H-$aruwgpQd)bY)F}G0(pjo^N5Yqp|v{2ux&g49gGW`3-L}A)t_J_+(2s7 z6W1P31FrB2s1CO?Bnc^$Q!s5Bqp*}P&drzZy5d)2?-u;pY8LMkP<)H58wRvmHFCq&?M%|D4gin0n7XBi^h)3yFR+vm}Ido#7C>qjvH6C ze~~noJYu?bt<}pv1@ai7+Izs}&0cr~z7A?{8aYD^Q3wy~cRAVgGQ-^?c^a{K8YkfP#obB-KBNyk+1VZQ5brakZ1F338(APMVE2$ zhNVt)Ub@y->whk*-2NdK)`Z$vQ0^pS4=dPiV&!ij^E+Pu6X=ypj$(<*4jh3nkdakN_e{)Ri$>II%`#3xa$x8S%E~&96PacNBLs}PPGdFA zVsz$vUrmeku&9`&kxt2O*>3jC^`G*qiLvyOumwve20QH-v|nThbRrE@VbFWY+=b z_D9mn!vNQkSB4yDxsvu-)zBgTJ9 z8|#e)c!>fGCVUq|C4!Ri2Z(^bD&0KG1@5Eg9EAw=hB!J zc|BbOL$>83gbKNEfLKIfy0qcw-!2(6b{{bl#|k0}u#3pn`GVnKote25acf}Ua>kHt zN(cB~hKzPv*r7L(f*$GZF0+UuU=5!cYTynkM&3VSU4T$J`?U)ehP&J6Nfd!m#tY8H zeJmUG;bmjj0KN3_2CyHp4-J{PDoJ({y5e@kOh%1;JCSjIr-g5-ZP15edxPZfkUx+WsSC@gk8)314y7+wsAGmMne z-I?6HsWyCELTD^)G2Qk>kS+n(<@prh4t%jd^5Df1`jJM6SXGPKF@IQ!DUgaKv54nf>irWNIAl z;Bsl?o19(okD44?C7to;K@Ao^;n}he!9_-q?`54#@Rj2=TZ_Zww!VcOw^_osR4|H@&*#<=$$T5TwZ z7msy2Po}T?VEys}c+ssN@xqzWntnYAB9O7oezDUuYsfz!J9zR&iL1vUtjiqC&mL9F zOytucUzK()nAAX?f@W!ECkecSt>)BDQhC-g1sVOG_8%^iC2CY43LG zGm4Z`uc%i}9sW1>-*b<9SWi@i{BCsTbV@ynr!# zp{gBAv$^47r#D82C84)hYf5HLp|=w36pmTuZ~kWWG#NcC)vtxI=XyMtw`liHMjJ}p zJ7o}iLuJ%Y>sEvpCQJK`>L|4ofW7kORG*Q6)4)#hef($JZEc`d@WxYZh*TJ)T!KV~pu;14iB_6KPZC5J zqGD~0e#OE+!p`4uoF#YM^3mdS)q$+tzekTHK$89|Z2PaLVrZP6e?O%1A)9EDD54<>~1u(%9bR-$h^B0^@&fagi{}Z7o#W!)tb1lh1L?(+KzxX7plN3?M01Fr zSu1>WaW5k7)6diET}{*B3gP;JB#|%~4V^quHxq#zvJ#Y#Lo*zMsqPE1_%13cRJaBN zFcWHC_X}#l+)c}<5by-p8D8;qJPwvQFX=I-|1h8m%4%xh+eq+Kg`K|4h2@94nlYU)azKqF6M>QSrb z4$TZbpgz zKSauR@li$;w)L3D45=tymdg*piJ7z#^P@=)BCK{)QM)iaE1-bj?jX%82duuBwJnI%&1LKGnRt!uoI4 zWP%MgWJajA`jd!W0pRMjB)odFDII@4C~lnW#7({>D|$kaJdX>gg=DaYR(Fxwu53c) zjD}^7JT?8)$2Mw5%3>QXuw6U;gI{qWJO&Cp)1%d;&3bmCwhI`GfiGFEYzkUWzenKi zcn#k22>5$i%t|@=4`|LS?bv%V=z&Sm*F~5hYNr5zOQ0C5x_OVlj1NYke9}3z6~AA= z?8qM79u1%3l7!fPstjCg#489lEER``on~Jn%%SXtt?wBX=SsDS6lJgGJEP>Mfq9!O zkwHPdGcH>Ut?5f8QSp6#R|jl}i)~kG3lK`crEIWrcw4If7AP*8Trswbn~0CK#8&6U zSum3Y6V%l82qKD@Ztig!T?)xIZ+%js2agIX$!4$Y6MD~ZT&z6Tdh4_} z)Wy;uaV#vCh6ZvmUg&@G0!^WBPc%TqyE0AsmIZ!;F9HkQ-)UqE0#Gj|0I`c=k~n1` zzvuznKcL9z>H2v9+OafXSW&S9wm?d_zqdEKlC0YPVn<4@b4jw=_fA5B)m@Kd#sf@dMQGK z^6jHQQ6;KLlq`6D$O5ky=P+Jp2^2%QN~y<|?;_zCHfW5M_K)C0wvNNUxy&ZaiKp&- z3B)!p6K^=%4QLs_HyYtC3Ny*5ulp;kmXH{RcOB}j!40B~c(3;@0SneXQYG0(L($|< zj!Zpbpi<5ud&bFI!zW+mE7y&UGTtZpo?Fa=x36cxUrktzILJ7~b7?4sk} zCgg|&xKiRtpuve4pz4q7p~0c}1~U!kAlW+6fbdBx9M~;Ec51lSP~vZ^W-jmQ05IY5 zljLyGoF~b-xqP$8LasH^E&NLOZWqK;qsOr|FQK;Q8-v->R@l!1F(z+|>vMmo2T&7B z3Ffev=iY@(8ZJluFGIL*9P=os@hzUwzrA5ZHKU%i-VT8iHU4hx0-iPVZ;z`*Pu-nMPEb-HtEk(%2N07}<5k;UB-W%7{*9pa_ycaFCMsoRngb(V> zPIFSQtJZwZodyr)vSr!kLc@}oDFA|4>b;-X)YN(})Lma%OH*mFt}{({7UamjW^Rv} zSeRU#&8gbk$k$>_f2kXlATrj$iGUp}1%Mb8@oz4{W0d)IZa?f)afWyS@%#yOvM2t> zW=+9w2yI#HB~3^ttI5g2KwDI&oBJw!>bwMDBBjP?@Pz(_Oc%$BFG2A4oYqG?lCH&N z5IQC$3Yo26C96D2s=Z*Hz?8F&UPEU#>z7XC5roLz7FuXOqLv|$Q`ErqhE5$PbA8jD zZ8_b0S||4`LekK9+f8vcxaL}#8s-%@u(mnK#nN$c#h3V@ruU*eHepA>PwA2sT=NFK zffj6qV@4sfi=05w3$zxG{;B=6tPiOQx#!u;tB;HppgHRJ`exEHCs z@zZ}jWOZ{e3U9F_mYsiZ zO;%F$i(UKcv!Roy10Y<&~K?`N@(M0BGFQK%xoog`23dQ*rgqY%Sf z;UB^V#5uA4iqohqT=Bl>{?u{UsI2iF$#P8}BKze{^PN1cwB}G3dpJZ3S8`?jm){<` z=k(Wcm)>St==cE?)sJGhv|ncdvuS%^LGZ)rJ@rmCG$q$;ovoYFUc)PL$)AsbVy5)m zW>7B6VV)RA`$S9fx+=O8Pz!hGm^-vr0x~~vlOKiXz|IrI9{s?hx}8+X;tHTE4DMV2 z|Hi^Uk(MU!k>zqR_u@2rLZ{%P*TBWj1Nu8w52W1r6mxwXZl;WNT&}J+-Vvub{}M}i z#R{M}jhXxU&+NrSn3xbqeJvu|EEANZNsa3jtT868ps}7&6{5s)ITU`CiII8rVH|8J1fbUt z0H6e=h%|u5ajX%gjbv@<5}10$LLknw>f70M#o%RLy*xHMV!^k2^|~1;w4dDfdHp)_8|_mTIm@V=nqsqH!niH?o&#h({UvP5WC&J3Y0C6?C&t(YL7aYmUdo(4s zXo&fj1~L8W45S@{1b=kh++FEimwg8Wy3^EzkK_2uUh6Jm(AV_`7}#Mk-ie~9)~TJ9 zAK~kO`}c5b*~=`JH?f}$r+u~&!bCi|hwF~zD+9j8BZpQ?3V}l?P_xysbGHzeK(#;| zrKGe~`E8$NvU5h#penjUzgt4QZZx%gizcFdNm_DIR)?fspu1uw%~?4-SJ9I&+c(jb1H zK6_Z8W@?woCo&Uw8i7PoVj%T=hr9U0xO4X+BAUik%Ra_&x30aVtYS4f_GkVzFAKA6 z;7qCAoH=SsB*i2adxz3Y-g&E_eX~bS`NJ!|cI|?8Xf&2%nMM{1JY0i z>0=uGAEf;$$_x$Yg7mjgWU0muF?bCKKry0|V;VlfkRL5=>alsL?!#7y90DZREW^VI zNb-!nJ$?efqeuKU?GivsKSknl@0`CZ*Hba#ZS#}JF8bJ5%G|gJcTBEo=J>2Yw98=F zxgQaTkqB1?Po>u%h1D5*-b0$So)ico5{?}YWl52nBK2?#%&=-fpO0_|+e0|6J#8)Q zpo@zbVr-Mlga%o61F^EE=gzNJ2x@Y!!6}=O&&itZBn=<1$CNGvbcF#Tj%!#8yrLY2 z5`7oVLHemoM9ZhTlTZx#(huNP)^b15W7yI=eJ-cA+j5_SaqZ|RL)#7T7(vI#X@x%* z(cWe~mq=9G9+1LkMPc;5r^t_H6I?hP+i6(J&Elg#TZ0!5Q{>p#*BM+c zoiz-}OQ>*Z@WK(|dqV71a1vfVE|D35KH10z(2XRw!H~L@&m^$!h%tzd2saZdbpwG; zv-md)o|nFc7#$7U2Dn&_?!xkm`5kt4Xve9AAUxh*G$0~;Z_dKQJN>UU5RO?sAPUk% zqP!MeiGu6#qQ2bVUrCST&@b@VhEpu? zM$2v@5zA0j6zz08(lT)6hm2lnxS5v9GJG+&O`0=q-Ytj7sRX(Qs4hjap{+Ik@t*mq zUCp6Y(N?ucS%!Z z=e%B6$9g(lr-XH&fGx$oSPT#Ps<*i6mOIF{C@>``#Ou>*hsO5c?H4(9o<)s z2RO(}v0vDpVu|Wc%iVKq93kGMV_fbpLwff+SVzbXEywvb-mpN_cS7BG?By$oG`X6w zs0ug}h9{-fJILs1uf$JLU>_G(Q-jMc+G>X^h9(so%{xPYNtPxE@GV{_=NoB_ijI`> z4}@07JJT!3Nk$5OB7wM19AU+1&Mp>KZ#gZ(n!d2HjP+(M#T16VmBxV3^X z>~t=5#~7C%GitaR`8h5RD}2A`4uhK`Y*^vxU+bhmTS>!ISAxrYKU5YdT-Xba!2W$G zyBcDUhObP+Iwv|ca=@70&#Q2yU23JZyZ{h7BsbQgfd`_o9YKzExs%#iRi6^)gbs|# zGFhOV0Q2z2vAx;7!I?mzQG+sTDu3q~r{wz|5elQi1yEhy-JEK<7C$*%C+swAE0mR{ z7m{_gw_|)%3Se`~H|JITg)EooP}(v>z4)6yN#e>;0j0GHJ<7YYuAevS zRTLl7ZnWa@_wwpZj2H-`p)T_MJ-==Bj440iGOGb^*pEW+1Yv>yxp2dylcGZ-0>8(5 zt>nn{GOft4>;rFbo=C*f9PTT3qk-xh@R?ur1_BKs*#7uR%kYOp3xw=E zGcYZY$fu~ErCSQd3pf;HDyb8w|7Y_iCpkxE{6Xqo^$!AM*HuggZa?_QXuK~7cW#$4 zaa8_$ZQ4)m#75?dM2epkB6X z_A26&&{)#$?iUm_rV4`?%Q~F{GVge^aNF6oIG_Vlt@UDHUb|>T2vo;;FOfXp{S?Cc zPGACjrtR)RyFu?GO@}!o^71Q@)7Bw zOyLOhk+yzu14p53WNM~!$_2OzqwjCd^04f(UC{%CjWVFa5U=qim5ITgBqw)Z?hp+s z7?!krZZu4|o~pLQ!QY#CxY7NBtf2ZIXGmxs$p2Qw_4ur5%mE?DG#=K2uSyV$xqxFy zoT;!7Ip?1Jw2gT;hKMc{$q8o#CUUe9`G9a3*Cj#*B7MOLu9tPpYWH?g zmMoIL$%iL`7*>xejS+Hu$09;`CaK;Ehjdl-n$&M%D;Du=?0#6rN{%OU#%W8}apf5o z5#f`6G1FJi$tz|SNGs>&cM3D>>LFJ7Dn!gov4j_ngiNnAy%;P4xTDtaH9p_6|&&^d@upAmK5YVH`P@hJoP5IJ$%eBne}CkjMLa}Qows>wYq|)W zHf(p9W&0Rz8uxU=!3UbV?*n;c5^ckEk*pnf$vG3>0UD;X?oljegwGGb6&0~$@2G{W{6zGt5$iY0Na~U-=vGY>#Y`$QvNlmn2-&H z)~&!9k0ND?=Ji$1YZw~6dIiQNBBJTsJci2oV&bEuJO*8IJzo9kb* z|B}V3qa$;rQ2|oPZM{%>)Hof~2x&s<>0HuFGbU@Z^Wwfj|ByoG@`Zrj8uE^)pTNP! zp#{6x(L*4vbnEFM0+OqlWmh>2`8RYF(N613TIm8|3+q(Y-Gl8&N>)c<3WoG2Z;3U-))G`JBz0JOi1Z+y-OX2r*VTqv zLXF~bS3%y)q?Zn3He@{^R>0j$u>5Vfpc;g!{`5)*65h(WnI!KGGs%)VEF5)R(wOQi zNwYO7;XD;8QFl%ISvyd>c&Sa+!kC8P@uf^rKA1kj**}uVN_+1jW537c)ERP2^aWO; zQGRTL7M(vZY&DG=dkB_a9q%uI%UxzA5zW#PT2O$4O3k`(6#t4u;3OAJrHfI~{L>H* z^NzZv;-bZP?M>|u@U)1~(Vj4z5Xpib#TN(;>tQaHuUTNoq3`i_5yPJT`+gA|;Rf4q z82bJdHA~%RH^SDmHx^ca`jdCZgYH)e(R@qMOQUrM!L-FT7JFfm10Z;I*94ycSLK`O zR$zu&6W}jRaYWzL_aM@<87%oLFlwJG?if+I^KUO)>APFp_iHpBMq(QHxy_;i!i?3i z9o>VK?bRDK{ANMMYN}2qh6pSU$}iI`reTL#S?{?cDTeL>C~MV(Y%0HM5J}yc4ekcV zv4o`JAnOKsfqn1j9-q*~8xdD)u;M!6@F5K?0J})_7WUIhRR(;+)j_jq-K!iQ zjxIL8kBFH}DKyk$kA=bINzQ0cy%HWz@M3tRcPsM^PMhDEHcJ~w8C0yO*O8Cdc{`9g zGg+OW5Ep&)-LDH2#t7twB@$7dk2@z-ou$G2;SzZ;Eg-6Y4lDU#qj*6hBz1I;O4Aza zG2LRi#yCW|kRr$$DZW;=uUiSO!fF`YL^7HYmy0`21F*5$U5kiL=Xe6j3e+TYcI^Ge z+mKfU-O)~;f+8yVD?6bLCzFWA+@Pm>>ZQT6;t``9)``Bbb>jL;6w2q=JejFjXwliR z^z_({DYH6uxyi{XK}9oCO;?$2cW|CuR7c*pbPM`NsaP)atErHqZ8IKbFOW*iz&ITs zd+tlLp4Sifj%^k=eoN#MAKEhC2zF{r7$zugyb=eBPB8^2h?*%2uUiEo6LkNb6M85# zppC+^2R@>wW7q`hl7VwJ-QE-U)`(6r;zmeME|~KQ_HaH-^dQn6na!}}ZJEaI^Su(< zdO=6Je5i#eS(f&*(z#+T=)#w+>Jmcs7U z00vmx8kys4S8gl06(b_jYt|(})e0jZ4GDc}^HKvvf9VIY9ebWqzD((9SpObck+M0C zP41V+<&E@DBATJ5Hm>!EyFls60e6IXeb#nE^Q6-1r=S=e{QlSZK0na)wffnK3N|B% zyXf_8r<#L;SCqG6fIj}Pk122o`oW{jP}Af^Z%Y|`d93r9BUF=|*`RI-g~f?5%2wf> zuJ_IAY&yz6*|$-OzDPv8*)Su<#v(4&1PU4c^acK`>z^+IV59Fr=0R#uQrM zS!)8S)Z?WaqtC0yuHN82G0}Fj0iTh!u)IC#PKb=H9R$k9CqPlPp-WtY? z!E{-)hRaL+f}+k3wo$DZYA}Zqyj2`O2!oMynAZ<>0n9>{3Zsx|DYEEha@2=00u3p8 zwl6{`)sy*MwHUgj3CNv@00C7CD*1QaP*w8Re#*^>U!$RcZLbg;><`^_@U>s7WVw1h zWq-&P|3I^XZ=ceaohSVayg#XX3(FOM>uW}BnjB57H?z11Ndd$Rf8C8y_+G3YXRp0Gg_e+vK5nY zs7{bLVs;Ksg$ciFjkF6Q(JRRk{RO0lO|YtdnxnTB4xkxVWcyaQ3?H$Z+^&T3;A8=Q zzWVp|?mqlg=>ho)FJsX#2*8_MYdQ6U7f@CAm6s#xOUaSk(}k3eyJ!?b_+2cu>svpcYUu+J%hnb)*$~9hclbS%=4Vgn^P~JdW#V4)_-)ygG1TSz=1` zRyI;OmmYTxp(GelIDC09_Yar34(^}P-Ohz4?&NDYuF3lplHUh&Li`Mfxc|JQe*{gg z|5s7dUE5e_RQVv0v||ui_38bs&KR*lni7JlC&zg}yq`GtcP=ih4A3aUtc#p{n2u9* zz-VkYbxdvs*mKK?-`S)QnJ{G7ujgsiB-)DU*JTu>ensHoj~A6WkI0Oi&0_Yiy0jul zOIF>rUNh7f{)634kRq)b5}6@yc-N)5_2gFgKc80cO__WA`UgE9_RwFfH}Cy^%Y(S+ z&@)BsSTTaME>~3uycGgF3UW+r>D}lb??u2K(*f15$&Zz1W+S;e5ukPfrG}4_o&8G< zSDP*}8Zc4>3$+-eY~Wm0tOGc37(IIDUd-aJ5;EsRi%2U+Zgj#P4y#tU1j8L9y4$7x)4Y66G;jQFie-Uir(BKV@XE7ZylEA*DD zw4im{+s8)?3COf!c-yrc`d+>@?dl{K<&!hRM{}c#FhI2}_9z5-^rfbKQ^sOgCBfl_ zfT~WR=;nA(rZPk!s5#ywg;CoWBRX{Zr&}WREDyg1r}_%Tjc9tF*)+<3Gc*PgU%M!K zV*k(qlA~FP15bCNz`5M@0gw9W1~V+`jC?^h7~d(iRQDNAQVQD1%`_~8ARE-D7uVv3 z+`$}$u24lg{fzxQ;x(iKM->|Y@4L%A zr-%>9@d?V?+|;wJ1!wPR%{boL5k_`g>-ng~O*HuYnPd)O2q7U}CwD`e3G z>j~}n`jTA~b9FlS{dQsD&XbW(e80;% z<|Va$(14t-=)gMA8Mp(b*~JlYNA65UC_&Dp-5R`|{NMipJ&vj$)P`6``b9>%TYoZ& z-1N>Hg?6kTxCSQ}luPId4@=gs8MYhlwbWKMo;6b7O@Mwq)XhI}(jA` z?Tv_h7(KU}{Yqp#p+T+ju5d4W1##d_hAu)YGb%?&x{F-fiNJc!CvAQD%@l;?*LVHF^#QbNQvBXO2Ft>qlui_>>gw%^v&)8P<)zYf zrugR0ALyd~qJiSQ=l$0sVd2Bj-hJM|f=gTJ`1W(876mzBpa)yri03(Oh@#6AAnT4k z+QCNNR=w??WlAs}InV&@^*N3A`NOevvtHMc86WA`UT;fdkz?7W_X`VTq!FF^yg1ze zQq+Rg#$j`fi>gE!FBGob*Y^#vJ_&EkI7c+;eGCEINHgt3Rf*beWqRHG4y$oeFPJc z{{|Hjl+NsiVHDuCoft9g$&^z$H+JAEOO_S>de1@;oNJ<@?4(hGCDpLy+(FFJfh4^D z)wPP)G=JY*M;V0%QL9j2AM*v@!Va9#-i=Mwvzb8;RdVKTc;xYJH#=mV_@9>aWT4Kp z&4_#BQjS5KoU4H=VLrTkmp1uXFDOU#?WSBPd(g8P8^^WIPeRyCvc-Q( zA8?=Kw2P5>;ns1K!)G7yb~z+U&&NeG@=P(WF*p+ZBa#8C518%6{<39`QGB3(Ou?ql zlmaW;u&uu4tgLYH9x$B(taC^|!aF>Hw9`Rno2?G;K(isjzv0ub%5^!M z0^k{m^8xm40aGr9v$R+J^9re8hY;!9!;3RS01eyNx#zs{-O7~2Zx3|-Hc5ep#T)@x z!ZxdPax-R#u+sIM+OYBO8K=;lbUW_MwKpfr!O@v^XDQmXgX8B$)U z^rgte>u;y0M$cPv`;2T}u=x_7kIeD;AAUpqEdu2~Ch9;0JbmfV z$h(85#@LtOa`TsiGYlJo(M!oH;LgkT0sI-@p+FZ+lrsWXOi?2GN3`Ied4qoyKqB!| z1|sF?hWHJvgP_SC4TU;nnZx{9^AYl#I?k^YMi^8Fcb1oeBq~>+(Me@(D5DkAl&#He z2U1miieAC%02ppy!7cs@5|AeBUHd-dg<^)hfN=eo`1Av1kl#`j8!&HG2)uX(GBv=f z6oBl46ZDv|v`79L$W(YcgR4O%2)J8U(;BjE>X})%&`i0l)M-B5EvX2GeoKru(3@jusAvPrrp@(T&IcrGIL_P4L6mkaUQLfoFG_dNj z(q!)fJh|ZE$|S9`vo|dlsLzo>+7lpEn>`L9K)emcAZ=as*uF^Vfmy%go7iEJ=PSQ) zZnaTpLh-@*1+)~-^9GK?eK3*6{u(hBIiU+cLEk9hTnsA__AHmXv) ze~da&^74T&RO}NHWII_xQI103oyr?W>MQpmpAKeCUM$u`tMupnSJCoh!KE*f3KUm!pyWT8ib#BgLd!<@+xGLb$JO2SX#Mg#+pU{Cb*eM8 z;Sq9IZ}itv9o&?LMcH)VTm%xZI*#tMdd_L8S%TEPJZ>1H13t0ZuYv^8{AzXQ$3H#={3 z1-{-{GAmniig1`QO~=8*o^-1?lczhPU!1soni*YEB+631dRvpvk%wIPI|^s24Y!?g zoN^Y;lO;wsoGW}ZFGUp%wE;BTzDa4OJhFuK5aOLwF34&=)zr?oABd-Kjq>P0*{Hh* z^=_1L*pR^-CP-Xyxavp`wr8}jJ2W%KhToo~1L`{~KR7ppP_>lEO=o(8P^!9%+tkLyUu;OP>_{Jov;Zm5x|C{g!Cvuh**$USj@F;`v$@D~#?Ul04gF9|>Hqkq-7ZUcb_D?^E zI|uQ9aJI?ms`Xyst}i;Toz&b^@BH)F)*Uz*d;7-#L2(XYNK4KewQKj~nzj_adzhFY zp%in?Xye|DjvG0xAEla)2EowIeiq38miKS$4)!Mi$r3zL);aYsq;01&v8hzZ!x~N>P(7G3`))aTO8gVDa zzWV}n*sMe8xlZ0b`UEf0;U6#4#g$3gRVx2}$Mk9sk<-x1+q(}b>>JB#fenh`t1H*! zh!cJ9$DY=Xd(y&uF$!eFTij~~sZv#pQ?BKJ*dHSB>^pF6Drlo}w8pbGiv#MD7K6^@ zH!Hc?wu~9{QW$?HlKkigz>i<=N0L%p>1c}bCO(7w@MQ^Jj$}s23pAZdz(-vM!{TF6 z=V8Ap*Dtd20g@pyR5AdR$Tu^NlQ#~2W7~i|ybN%5?j5Wn=bIA9PSNw8M|SwD$>kUv zTNOq#W0P4jL7-B5P>P%>;M1Bz_vKDv+A>V-Dl!*Uv$&go8q2^)XPjnfOmPk3!*;MA z-hN6pL#1KDUXyPq*^dVAVvm3}X)5}-y%vS;REyo3_PWRGyzE+i4S@jayLxhWm-3WC zS#BR-NGIBkxH4JkE-X@LojIx{wYf(lw87C2$#E?C9I5H8X{G>%b`3`KY`iu1EO4dK zID)z;lLIZ2C!k$0izBd1_NxJjbpxItlJg3fley|(p0vH~e(^cv;vbrQn#ryAO_;=T z#m}w|6bgjNA&YQ7P-44`oNOe`Q~MarO(kyM=#N{Uk)nIV(FlAJMT=715C-SjTUJrOr0D^QpR({Dt&@R*ev6W8QAwzN9*c#9G z;AJcP0vQ)X{nw_uZ2L06{KKKkCR%O(q?!=%iKt)jtP2=e336bgLt4XJiWQ;J=NN-x zrzurmUG=<)>;$b$rrs6UOP^%CI8X`jXmSX{%PmWwFRKpwLa3KFFKml@MAbp#LWcC8 zHBw=2=HpvdmNc^Tm!V&MyYi_u%KT3n+209qV;{$f_)U(~=*_6cDAiKFhyWr3!wVQE z=$se-vok)ugzdTO6E6l;Ch@7RVaUakt*iM;)zcu<$oZLcgo%TpO&oA)hy1t;PXl#>g5~Kq|b|7EO2}k!Xgsn z5pUuT)-A0rlwE*f|7@(p56i5T#K3HgAXn!)k_y<~UHqJHd5!)8(64jS{*8+V$`7P( zo|H+G_Bz(W1vz-z(P*!ongZJC_3n@g`tB%7J=$3M<%L!b9c#iO<6NoWa$RmFoky$G zA?H5t%O@z(%5yE}hd_fVlu!VI)2x;N@PYlUYha``5Iu!pj7O9lVlz&g@)Z>6`~R?Y zPfdboO#^1jwr$(CZQHhO+qP}nw!3Vz%btE?;)|J!^D}nt%=K_&qSw5#h}ov-YlI19o)S~vqV+AbdV+Y$Qd_*!hSz;3>gT_@j zy{UC`H^Aj51Qq@5a=u@}?yaGj&N_C!|s@9*LV0-Jz(^`s@1-h20DTN&)61kWehpl14!rtrgVb=xOA`|35R7j;bub-##<vLKHk1dSQ@S z^_sBr4>1ldAv85=JY#K*SR2m(^zE`5N6|T|cJiO46Xkng^=#pp9~YQsMAeh=Yd_8V z$tBRkA_DCegysWApTFWna`XA$?0p3U6HGud>`~$KXA0KluVtTa#_GW1SDiu0m=DA+ zeh(QAbAwexqIuOATuP6uhXGKv`Kco>wPSz-IoktdGgRopWo1;O;y!Xe3e~u4{4PBy0nlFNu!5P&{XJSOs|98T zuX?zjQuhM$OEEBIY$TnE z0k<$0nh3qf^^x<@`N%;>VzgusDYcw`*d!~1A$l~y1~*wPEWaOo@cE>X)caqh?P@vE z*Ux<~NyUnk6N>0q?E_G-D}^hIOd^9pN)kok7e%SpPE|OtBLckDv)c+LUUL3m9wD~X zUI4c^oP?O8)T}ObXEY$1LR0BRN8nnhY_fAhZ@Mw%tC5{D#r5&Fx%e2MssJgbI)^J76e|$;6sIh4 zB`Xtg;Z~l9S^tT~8z;3p`V_Q{m31B8QkTG@;6xXE-b=IWo?C$9zx) zOw{1Eq;`;1!N%<&rT3uyl2~^#Gwgk;=+k$DH3iCdMy) z&tDEgFMwJw?Wh6AF_JRlxI;JoBBce2V87V_>4kQ~j$1s#kcb9x^}7)cZRxU2;N$IgfO6;XtGE^n zQ4CYpw=BDM|K_{fMm;q;J~OM+w_3Q_MD$L3m)l*r&8iZ%m3h|~jr0X;pY+*4_E-92 zW)3irO5mrCV5zXd3E_Ph4|;=B&FV{06#4vF06Du>h&b!(oZYcNR*2JO_q9DSfW^SBK=LIhc zn#Ho=l?Xo*L`<%J=;mS7WDQ`NCa2w2qnK^MeGU}K430!;4L!0Ruf44@ot1ptx# zCzJ{k`jo|onkM3+ywESrjW!|;M~!;yyB@GMoOFfeh*^|hP0Y_`H?+l3PE&FvRIC`nqoh5n<=icK*I7)?OA5TXB7awQk zrqr+FrsPDZ)9elL6ZbDU|jfVO;T{t|9=|nu) zv+F`Sg5jJ1>}H>syz8C5_wR*>gzp^Ou$%a(0^WyW2VM?q++gGB<`cn!Yz+Q^%(^Ps zcf&7c=Ali20|w#a3;{0AddEQNT>{_K`G<3oGLAHgsx}sX2pmC#SB&7(tEjp4NI(D`vW%>GwE0n6l?8xGW1H0gr`KXirBU z5oaFT)xIqEp0y^_&cR4O_6PvU*W+d=N4gmED(9SgJ_PXKnXs(I7I5Xq(MOy&Lc@m^ zchFGRP1QBj38g5To`Q{}%5M*@2%w0-%ft&jdmyjj>ns{p433 zFH^9dMDGxI1}U1XT%yoibf*v@9_s~>wHGGlk#kP7x0aRx=6%1I-avk{k5ATRyG=p^ z6j&c#+8XW!mRdV50?7^gX>lSfFwyciBTjriZYrr2I^2?bu{uq5j7@>ZqC6^{h->I1mp1z0 zv+T_`nFfzn`GYA}gaX?D-}?6Gq;+U3((&gqd`Pr%J->4{0macon?^kOpF8-8w$pu| zQxXjc4Fu#!0o9$jDl>s;0Zeq&P6wn=Zm{i&Tnc2)nOteHjT741S;7Z{;bI7d*Sa46 z%ErhK&1FxoyB?YC4-ny7*@^VmLtrDwJJ9K*I2uEHT*n7>f-kbzih##wt-)ZcAybN0 zWLk+#meLPpleY6$!K6#5V&ive_dUYkVJi(sf{0IoP8^gTIpt|(}eS8DX8!&_cbZMkHv{jmwHF2|sw z#o&laK};@XsxCem7?Wp2?NFncBUbvVICY$UMYX;_7_N>mrm}hIPX0udE-7}w&n}M+ zp75{8s}IzGg16cToGb+vdah=1%NI!rYTHM!Lc;m$I=H~7c#sTsHFDQm*ij!mhxF0l z6(jLGvdGMl0YHOTi8T%3D)cF?u%k>&r&KdMW>5$>zs66ae8T!+n1^ZykC?bqzPpPu zAVr8k=Z@jfa~G@*cloIwY*#|xrhJCVzm&EA!T3!@R0K;SRE~a605*!$2X)U3pK=3v z$*@R^Yj?U_!`@(Dr1?b6_<;zH&B|ZK;a*>%@0gk9ufItQmG5uMTDEzz5(wv0rrWsb zs2TDv4SCtWF2kJ>sB6p5c2kJyfYDG13u?!tMzff*o2DqN38pqWl;<2njqqtR&>zB> zM(?X&n|Nu9?mw`{dU;?-D(0$J#CW#qn#fdBs>;(bep?9tdVvwD;C68TF~Ae3Pgpg- zWzwj^p1Km5Y+N5RvfUsgx7qFFL82O#p%b`_5o+3Ip&ef7};~Oe; zB4fu`9%$N5e})5=tJBuN?xaFii*OC;lm^)Bxeh6fh^_>s*w_9FS5aB0DboWW^?|-D zyyNdMdar_Hra1KXziv_-@Pg?#CP7&LBO&Df;+2j6^o2|VeH+f*h{wYb%OQY4&wAa0 zsfKf{LQ%6(Tiy($1YFgcd@5Peb5l%%I(1lvg~#i{VvBpAGFf)bPDB7FM5*%94p!qD z{1?CNcw#AC_gOb*pn6mfq95%>|IMM$(OPzR!=m?u6Fk-JWcIg)9X>`x_fpPv4S@{^ z>ZaXQi0p~zLKUZX$lE6|AgERp5$^)Ku5SnR(==5-@ftQX*N2y&Pr=_$4T1^n}WdT6~`*?czPe1Cxtd&W(&5+m;UyjT1#c&}9Pg`TI(eD;7H(_644(3aM~llNUb?&vhl^?*EEyetUPoQY7w@_BbpYTSxNZ4|K0atFyIav3Ei9G3c>3l100|i#~~52`K4$Jdpt;^c@YI{911t zuL}^^pXej;dQU+pA?ggqC*hIUg_DM+5$ZlIa3%fSyPHq(CANn>iMUDxSjnvLfm#&d zfR*=NCl3o9nNj(J!4YP9(C2!TGg!7G-`)fWtt#fz?Bl24Nc`tf(wegw%AmY;YnQZ! zKhn^;KOwB7nSGR_G9rNZHv`CmW7E8JG%LRu+y0)Dkuw8fQVt99f5)cw)GV)lLAem#da%eQ(%7vZZB9O*lR+>G62X>k}csio?BzZ1-&VWW9`{ z<2u2fyD(8soy=jXN^PC*8NZ>FG2JyD-35hSUqK@GZHcU~1INvA(%Kpp#d0Qm2JU5` z_=WE+)=5-SUIdg*1) zWM-;|zt&`4vvK$N7 ztjq&ifA?ZHlTYRer3*~tQUhZl5bQNU|E+EAPWR%x;dIuU0w*BW%IFhi*cE(X#23Rf zL_ltk5?e0SC#yZ?4JY7>scfe3tG+5T40ipfgHmBKVf(!6Js1;zlv)Ja&977-FHg08 z93|2_z=^1Ph2c0Kh9oAJHRf&0O!|Wi`Ti{V@z%~21}%;{4@Ypl`?C3Lf}2$i_@gi& zW=)XZuY%-$xD@7dbjP)rGJ}(6Kl_TYyxUO>zRM5Lcs5^L*3b$i`h#KT7s33!YbFA4 z;Cqe{($4ZFpTuQi{4R*40!VHg#_21}s+0c;u4LXBisxgnq?`RoOv*IV0tCR6h%Y_w zEo;bOMpFc={oYhZoR{5kpH#zV3#!1GPt6W6X93uH8SS)gTeF8AnQX0o|JCpK{5AgD2%&OdeTo3;)>bg)`+3g!0o zixnmr|0G$r#jRz+p@NxiC~iSm?6Zj{@XcKxs*zYs279%$E2NYQaoD z#YP*P%r)hWb(|6;1ANp5FW#WIP6T)Q=Hd9YnIz(`HV^jy?@yC_6oEhs{NcTaU3yCT zJQcHHvnK;V=G}7CH4MPQ0X9aS-WPy3JWc)Gb}zu=7ugdxH!g^qdY)R*g>e;} z*nMJ)@8H@zK(iilNo~gfz&6ABe4xE{*wGEEK4G6u*QhNzQTZ(3w#4-~Q zEXHfk;8_0|cc7=#%1w{%Jma=V#s(wzJuEg4+E2w;1_)vq7d^zivh(~v zaR6z$ccZ{tN9Sk%i?LrNlL%M)glfB*Q}X~&edY78dCP9NIl1X?4Up}NR3!n(#cJae zbWyfy<2*G<(28kmu(BX49HKU?>h6A(bdOqXV+9!Iqw1J#8+(%w+t%;br$?h8lIf85 z|Fz}|kxCQ1dX$#zHgbA+v%z|fu~B>x#@&DWX&Z*b`kcBQ_~hqIn8qZXS6rTDj0e-o zP5t^=tYP6{+Mzp7y8W*V^(zz}|;y<#Z>dF|0rLa2g zUZPJkKlB%DR`_qnC7`J|(1#N!dmAN9!)KMEzQxfZ*TXrO|9LI06P;!ah-b==a~8F5 ze-Rr`xqLa9#g`i{7JBFK2p}^M-1;3jD(P$|M?jHWUG0No>wt@nfV6RbPhAB<6q)1_ z|ESW`nwNnDq`Ac)Js?sz*efnq4rL3aw(7Kuy^&d*iF~d|HJE+MEqF}Mk|F}1e#e>i zlPSw{&!Y#XH=y~Jgq6FKrdQJv%fg78w%zRhQmY63YS16XE&>W1RUF-J)~+atzrz#B z6~YVmI!Di*D6xZx0qDdffk#+E z2}Hsjyp=Ew&%heFvH^z0!w)-+ugxqMaF}VwLmgZi#+*2b7 zH#Mp3*$iPGMgc$%j+H@%OLL)*tIP&v=a$EWcyu0>Bnqlu-m2`2qNw#dO}UpdDEx%8 z;DUzq7Wr_~)fyqPU8#_jqD{zVoWKBG+kNM3V>O}CiQe^XF2#iY3gju+ZT(~w2D)(= z-HQS{oGLUu`J-dOk^dN~=g??UN1^b5bG7_ zxEMmQ0Z*@ERSn)CY3c@0=dn%@GO46;`EF{;o3(kvQYkhLLS<9ztwoPJ0Cif*e#Pvu zVVn|>%Aw~B#Xg?5y#Gd8D*Ry4SoV)uI#*0_3*J{A*H!Ql^HcEzFp8uhmq2$H3Oh=z zJpnvf$;6dROm+Bsa&PN|0u0Jepm83daVL-YXxP0#h5@bU!5(4rG2njZAx;0&?Xfx@ zwWK*ad%>pC9bPFxr2&%fn39F{(~Eqn{*iaxFYp?lpU@YkA5b5cLIV{>2-r=ugKbez zT8a=kf%6k_;%g!HR&Q@m@I`u;6-({CR<(Vtv_-}o z_K-`OU1P47Y?)3#RXgQgZlKj~FwERnHo9`qnxg|ZPv|6usjDbR- zw>ub3PUDE&h#0?E6fC19>)`A6fV09x7hAZ5Gz1jL25-<%g-FCgS^%ooYxf1SMxrc-w%5{ad_OfvtjNq+>#vRZ=hk>yLwkk1 zT=1lbVj=&1&q8F4Uf~>1*M%qfQu$e%WpYA)tbF1is$~+3mp*3LsxBp+v4N=)UV5*+1 z;Uy$3Vx)M3XI)EESiV`(V@|j+KtF&{dwQ^Cz}c_ch~Kk$+<5NQb|GKOlalXE?8^7d zdVd+n9<%6SSimZYC%7;Lyl0Q3OIq-DIpizBj}##is2A$o?>`EPl;^=K%}vz5{3R*R zpC23&hUtZKtCX}Yo19y|y1Race7mpfEl-!~Y_W}xY$j}Jvj-zq3D1)>7WPQAn$3?e zxjt2nk>~(DM0|p%C~6+f2b*L*-&xKXrxoWUxma;c8K;@2V6yWhw>#=Ewnmt6oy*}e z@Wwe&ax<{!;?*~r7AmWPDOB7n~jNB`lTbgCSYGr+FYoiCP_WJwz}8pW15|Vu0LlzJ=9s*xa&B zH7(#po1Q>5@==i|b-g3Yfo?rBhbp}=6Id_XdnIo_uWWH&MRYqMl$dNUqR^)dRT`sx zL22gLjYxm%h>m)M)RXx$>`P-1>@u3v4k?+k%>&AKSJZ%Z7KULnscy^e+MnMd|0>AL=C-uz~g}Xv#{sn1RSZ#u}6zX zPW#f>zoM5fWR2AOu)RM~+>)yZ4~xr!wh}H?(_D_dq1d4B_zot0VnzrMcY$ zQ!<*DYDflH&2<07kr~)D7FUdVh0xOp^UPCbTz5shBFiQl{i^!oJu7Tb+Ft}vB=ssJ>?EkCRU(sQ6_2JX3BFu z`Jnw9?61bX8KFdi`X3ab!`0Fslah9M!mXM3G)u3kacw-yzHE2o(5^Co^6ZH=Dr+;*0|ah1Qwy%8S0lcIAO)by!U*&tSUTmA8sYFns>9-aFb z7-`o-nsc3{btLaMr!60SjBX~q!iYTY z0D@d(aiw&W(J>7Al3DC(fr9zo0k`SS^LLn$nnPKYUgpT-*mTQLl@Ihmh*&|WtDG7h z95K{*hFGum&#Ya7s8OWI0b6DX6H2l?ppsuZX?f9}IN%q3$oh{Yp-Gdze`!Y$4szhX zN|tj=?rKXf1}!V=fXW%J`&HFc=%?hfMi$GPW~$i>uJU@Qyrcmf*tdS4Bqj^O>}bjn zVgJ(9b+*KjjM9HiiOvdCqTknxEZ>_W@sKxxML6A&+xAPx(1^-U1n|qma|al*BF7`x z(=k*f;#yTo1C~7PMZgLAAa40XwKTm=!T)tbH!#?J@;N1UxZfY-4F{IS5t*ps!~4Au zqw=yfJe1vx_ZxQ5GCCKF+`XpO)b|{P?z{08wcz?`;wgeqkQAKiP)E!ah=cAf^Az`L zLZ@BS+}=ExXEcC~d~DbH9QX%XoEd&fS<~^Z$1Y`va}{S)ZV$RJB|*3;vdz7}Co>~^ zc5f>bp~8TFrp+mw31cG>V0i8abl@27n@8YGAk+ky#8Nfk9l2Kc3Ae@)q;91F_v8^k zk!ZM+x)br2fed*B0_b3w?qJ`3>}lmF0YZ(36Qv&iuo&hRP@pI|fL zY>n1>57uWEE1ml({ZA``@PLa0M=bi1h^OmpGB@B*Huh<7d1O{)XG(oaTA6Y;s?wiW zj+EqHX=U{G_ZS0Wu3OXRzv!`}lB(#mp=sILp~VWtQXxx!A}xOOY?aZQ3n8<_YQORq za+n|CC3Bv)PMq4hvdM=huMvhC3Dc}CD*0*_d$e1443&JkL(Bz4Nxh#~6UF&M`HzZ9 zxP6Avw}fT_KQp`?ITa!#>UtICc@CFn#S#@?tKh7>*4Af-jyyU}~AXBm(!aYN9cjP1DV_bmn2OIAc zcp)en-S+IfM9+^_yCaxPi;li7s(Ho{_Mo4+TG&u;!{SQd%Xn%4GZUHHjv)^@s55cn zrK6`@elJI(y#M%Kb#uR35{i;;&vxeS-W)@OQkD+&k2N{okL8%R9rWQ$zmXAc1qI-j zHnRT*{)65L?}fLqV6`J2_uule^x=ni0lI!0wds*2agzmwfdey=w!Z0EK`LJ>_~cR_ z&clAFtP5Ds+*D_uXu70n=TR)vZEleY=U6cEzn`z^hH5FIFeD*MpTwCoPS(3itxlJJ`NJ1mj(BI&e9`&`l4ejmWz74Jl@E)C(xk&y)8+ zz^QfJ>My*)=Nn6R6l#upy_k z3EvubLMR#6qpu&Ozn*Y`#xMD8LU(YcjebV!oJ7r5MMlAMw-lG5E%YQLSxKn`_x%G4I|udZYSUXk zH@@=apQXH@jfcPmMM&<7cb}#nfWVf)u1@a4WtrzrKQ#*4+@cnmv8^3*n_dLJS?gsQ z6&!l{+K1Hxp>U73r4W3*xlp{7vH{3jHb;9Z9sJXqo_htM6QU`Ab&M!8!HrGwXELmK z)je!+YRBBjL2`JjBd|OINxX#$BgvNea290TXz^wT8>$0yc&v01PvW?-o2)=;0yK6u;9~j^b;g`)k`2#R?rqQ6SC-7D*ZOzcK zw@sp|E^Kz0Fz|>IOpmD1>zc__fWVJSsa>@_ea=>flD$yn7qG%Gk@0Puq=dCI4IQXx z8tT?#5=em8)o4L#ZOjDid}R=H+ciiHg+zdzLz<19GGLD%0&|C2-bmM<&n5=KouP7m zVRqQm6zZz$JjJa1M4y(kf=zcL@G|TYl?T4z@9vG&)~ytJX95OR$U_R9U=atMtEK3 z5;TuEAsV>#>g0@FLu}<bxfJ*s%ZBAnWi%^T3<-CX6ULe(DSi7A|TlvEz&~13D9T5-v2(iC*c@1wclAPu0@ z(T3mgZId8Lza}IM(|B;`-4tb2eHM`lNk!y$r(@ zn2a?630=&ZN5ngwsdk0GUel5eIGDp>=*0fgj$t{Pu&V9?c$>HvD6{;l2u>lr$IN5j z-1ih9xDI%m&-lw^VCkz=0;n!N_BPl=mfu4)$i#lQ0@H!!C3F-dkJ-+Z^SR1Ld0}8B zGIbge{7@&iaWmYodb7fcS<4dK0n-m!^eL|VyCTzosJML#kRZy|;Gh0la7&$d){{sB z@tZv}>y!KgHd`-(2cMmwH#|I&M5k;PQVUTJ7pI~7plO+py7t~hb9Fv_LfS;aHwZ+I ztN*#cI_agyYe-H=)kOLf>MPocZYsOoO6>`(9zGR9pgf3qbB9m)w`zj%NtNyp$a~b- z$~jus^T|E)NbxMXic|#?nV3mMg+(4s7v*Qea~2X9mM1${vN!agKokB3-?KVoCGLw* zirA0+$pqfF&e+KCcF5*PqZ}#nItHgocP{*dL!C{o@0mYw9$IV%|u`R0C`LS>rUY~-qfgp6^{xP|+!7TrxsX1hDP%(?(8l%nYHKG6y=bj*! z=Ai1`5^OpTEGo)RI7tCNEXEnkhk1|BPqPJ0a|8E*?F>@r$Z1$?9JcBV*AN&rBt@fA znRPVVD0wcF9i91g!kpyLF}gv1Mc&}J7G7hZc2rTi=>Z)5fcLmn8upZb94eIi$gQNG_&5i8!slNFl8X#&UtW6S|e%g%l2HH)s$e8>F$FT2dwdVxh5db!GVv|w?v zBIFMCEQgLmzhfsRR0BWrV1!Ww{FI~xzKG|SJ z$1g|VRe~H^d1a|H$q>!d?+GV~u7;R;fqSkjLdB9D)!y9Oqbdl{bp2{~UIA8#O7DpG z+j!66K<9vkFvN*w3@^ztRn24WN-&xed|Ho>RuG@YhV;<%G*|NkmX$Z3Owim@?(kDp z`7t{elqL#xzQcWY|J>r%k6U$fL{Ok6P4s#7HFEU0fk@>S|JQ*Tm%hbS5=*h*$0<0~ zPfD{}mMkVN4a<)7tUNr(syV00t9RT=p3|qELnoj!qQs%6*Ampw>G31eiXrk0Q};H> zuy(HVk12`XS1qCVnsrWp#Yf@KM8IMIvl||G&wzc>FOlWTYOZeZX-)V*m&O|6P!dRt z(-2b5k~l6?num;mtVB^`C2%-Obo{{*x40T^pwkOhYg3$n{U0bpQi4TNtg5epkD=ES zO*eV!pA-@^zCbwnOWd0C#9!G56}evXC_s{Qwwf!Xe|kuYueevaq2Jys>KznBpx>I% zKYR=l6H#QeSKaZ2+z`Y}Nt=;5{ahICbXqc2Y@>iee}s)^3`gM=;CNw7#-pMGMKtv_ z3?7w(jcb_K0g4K{9EO#4TJEQ{=gsGv%!>p}U(B{&r$n9Nf`sY|=GS47a&F%b`ZKPt z_Di{~MsIR_#rqZj3_B}TOM~;}XuKs~l027Hm#^VsX8YYd>g1_eVa`5;WG8YvFro!; zq;iFWwR?*Fd|2+NTRjkyfY~YN+V%k`DIthOI>SOQ)`X!ji|4(7>|YI-*65 z9%^C!gD35sPP|jyq{tKL8txUSRy)_&)=-PBI?G-yXSP3!0Jl~j28VTt)V~H>`^QM( z8O-=B3y)8!7wo=}qrff@Nyklrwit^s8?emwspH~01;y5;53(6WStx_y1Y#>>z!FWh zZM_$*h;R|*Jd}_NR~deJVoc55CA&|;24oF5IA~xr%OPupLiC@G-9jxo3bItndoKf+ zjLqn~aCpa-O0ZjZwY(=={Ve~gAL!pSB`2IR!8=GZX#T+`Z=9AN3|*+suz%1h9!tZ- zq#wQpJsuRmegV+-Bg$675lE0e^k)crbDfKzNpn>x zcSBB_JHD|W;HY(gA=o>Ru;wIlAf2J3R&*&Tme-yS=_CsBF^@xr{SG>c7nle^3f!hx?i(U?c79FSafikX5b3}i@G0*(!&H>= zHp1`3idqtU2$K+;ty|G-=Sn=;6UNxJqJ7)_884TkBG`VF+hjN>p!D`6`*=RJaMYyG z=(NTDT2#NsyL)cHKJd$z+YItSQirRfd9(OzG=_!czgWr-@60D=`pWHZifK8Kt1RW^B{vhWsbv zi}BD5B)?h4hcM)*6o>QoXZiZmioH8?D!TUR@~)1@5(#nH;%iX&9Sd~<;UY#3xlJa< z!ZCpE2Mm3sY4r7W2}m3d--=&Ol{VL8bJr?#eg{oJR2Tt2CsVTk(7#}=)1<%KUw1!q zc$92h#8|VfG7Kvg0S6W8&Ej}py9!wynh_$D?T{T`{Tn!X z8Lr#}VIj7OAQ~7YrWw}Timz2r8F;)!xRrzoRZ_8{!)~!o)|y$iFnRa*u(J%lZ=91# z;%8^Szoy2pljUoPFg+3YvJ_P%{`~&8+2NGwFqCJ~N$|JIYbU{*ow-Y5ZV>8~aahP&e#Mo20 zyqMr_FR+M|@O6LD8v}Ru?s2+|Xn>ESqv&ZxCXn~Ot^04r({hXnPEq@-e1|na%zmCB zGm~5GFzfw{_xK(wWn+>{##TtL2JxJ)HG;?}AM}Eo*+*sX;nTb=*EWY`{y}o1j)E1f zn#c)OU~M}w0%qH9bl-^qs4Eq0zCm|^(hIzBkkB1DU{35ox{WceJ;DedrVMt8JY%}) zVI&i}GOxV4RpVARUXGq$laiuLU@()&D8uo>S=lyE)63ILc;lMIe%LOPz1vy}MyDm@ zbS&^~OUYHh|$sEzL|AiuusT)TCk)Z&J5?6b1nfV-izg&lvyz40sKlh(Y zAnTTN%e$aWGZ>sk45oeUC@Esvln-=vLfV|e^r1s-q zRI6lxS=A)`RBr(Q)xHqk3i0U@@)zwi6QC|#2Wgw6L&R|-Y0)b4V|Kn-7ZcoR)gkDMSRy}H-M z0l?vhF!Snpkzb4Tv`1iLIM`=F<90Y!F%m>SB_dc}40%?HH3J>>@cRvIi}b>jP}q5Ip!OmfpEL8n_aE`2J$63!G44oc$OfnFas$j;`y3$YvEJDlPV&BO`$d3%XgR){kU%3dwtyO7sL%6Jj35FWhuvg_j7r4)rg?(b z2W62cp3--n+y$7ZE=xG_uVNSNmYyD}q!e0NWLAgCcKUfNRX>2@%=W%uH(xjpVV?df zA8tTVTB)4RuWcW+0a7Lm)U1#1cuJz+?w~e>4#A3VgzBC9x5h<-!TNH;P4!m9rQ2GD zn=`4jae0TKrP8jv{$37=K9xB5>N-HSURA*bQEfr)6<0maE~Ow~wv)%Y@&wpMn5;W8 zf?M|$a8DJRvt>sah^ajQGU9q<%MSc_A$1x7XVNtXG=DcLJ|{Z>fkpzBCn5Hm4KYb1 z&DAlqBYQl;RZvdKzq3YuwqQokL586xJE3_#a}d2{Xeo*EeM$>Jt&%&0ohx9Ekszqs zL(9wY-zbl4bG9oxDEr3H;>~#!Q6tAJmgTWP$6gM>lP+q&oc((EA!rrxUPpo)zztq8 z6N~mj&+y0=FtGY4vwOapK@+?`d2Kz)8HVlPLpSZtiK37*1_+BHtM@=I3NQ*!Zp*s(N5}UqD0@7>FpiR$zEy*wV%^k9s}CqU z*-58g75L&-icWe zqO2_ZP8GQ}Yn-v#bOZ2y3EJ=RoqN!klYR6-LIFm!1$$u1h42@MA9{Dm+mrJo936m; zwYa$>NRVs%?YW`MmSY83P{Qgpo+5MyH<;CKO%#p+BYqiWLA&ercxJMr^Dco}n){#SdIZk{MuU<%dqBlG>eJmL8h= zp%nM{(@@PR@sc1<598cvUrJL-^O`Yh$HBd-1$ltDD1u5RTWW<)N4_~J(FUm!GD%dP zS%$ZA>Vwm8h|qarFVvx&MPCl-I2ZdMI6LVK#FPL174HWLUfIweSD5z?FPtwx-NYl< zPG{bvWfc!xchmax^EXZ6B>aUe(=wZ1R?f?DQ)pSc7WK3<`oia{qj~#FeC#lju$i~q z-4eK~+|5!!(;Obq<+M+ z2pQlBm`YgX;H0hxs$AFV=sPhW7`xKlHWFBcJSWGPq*o&MWF7O#pO}l}Fx&SFSU{T-3O<=|=bjq!7v-rG+y8dnLTI!^EJe_H{rXQmm^e|B zSGsywD|pYed8F|JgWUEti4s{g&d=0as7OjmiID6#qH|=Hr4% zJKNX7y$Xg0vitM{C3Zi{Hn`+CAtJ@AO z?yzHWy3VZ^TooFfn|ZWDcTYLJB-~2Q%dtbw2(z=ZqACufOrseFSyyMXe(NuGS%!Kh ztT_O~PQ4syMte}uFi(3^XlJB>@$Kj`KanAPmGm^;6=gu!(I8Qt_8=*Fop&8^Af-pr zmJr(Bls71^kVGp_lG6yR4hc=3U-@}hWo+U0^g8#UHJ;x;xv_#Vzqrj`?o@ zL3Y06R55Xsj2Z?JZ5+elU$8LcuhzqmlU*A*aemkO?+tamCX=!q3Y4mAkbLn?Wa)j6 zcehU<98LSi|Ck=8m>Uic=^5_lT`98Xa5;_M-eUHd`CRDXvPa*A;gDAKhi-cRCJsS| z{67F>jF{0HO9e^WZJq^mwcz&QId+q()WO_V7{Xap#ub>y^NNX~lOWxcLYG{=cX|@$L(9v<-sw;AKDc)zed6!E7 zcj{FwJ^9qLvav;@O7JQpTze0PdK6;@9fdNIP7yinf3=zDRY|iHi~^m7vl4{b+Y>!@ zHz(V)8=E+RGx=mwWRU|Vb;o8n?(BMu?GI7~Pj>ASnBpO{3W!k764qwLqM5i=j=j3UM!SS3fxsvRmEtW72}UwTWAqJtBOVxbhuxzz@KT{ zF+2^J{ng3|FX9oL=o$vAZJVZ+rO|6L2-n;xPqJzZfM=}v;rGGZPoC-{5W*J;I!c`R zhY4+t^o`=@)0gl!)}8weuu=8t+dW7WY-LdPD%XhDrEP-YO7Un0Smv8)eYmt6cF;RX zOC|T|$U%9!n)r0wY*04}$lm;-%&UjF0D+O72U_zs*4r_L*B3a7QQ?116ytd^^{?rj z;bBkOT1siqnhjz`*X9lA7+6$wU5;EMm{x@Mvkg(%%Dzk)^-6 z;1k4?KM?clW3y9!@QGH#Wurdq%I7pMF~VO{J3F!yBA2 z_S##|W4@$U5Cj!9+JGh22-HW=z3|LW(@5tv3RM?`Tt~{Fav%7LOIJrGh)90~qXlM2 z1K9ENGtf{`Dxn)hkRMbHsP4s~pZgAz_O^RX4G8Fxop2NhhPBQRt!dAmz1{7FK@#;m zI3CpjY4^}ga(p-S*MVegt#pgdjhpG`ag8UTo!3bg)65i?zns)o)8e3|`UmqEC{STb z=)$zLzBAj!*VY@WAoTFyRM2|-{bj4AnLhhhIOaSBu@P48 zM#J%iz!et1@e}ha-tZtQ)W;H0a_1jupg`eMlBrla(pHKDjZc;T*PMLy^z)q$_qb$^ zGf;(2a;Rp5IFTk@aCN)wIjwKNb)>S##oO;*F#U#1db>?^Wmr++xsCj#((9M?1n{sm zV)Y3>5hpACW-9&7yn+c?S!wGsC6$)T272jaWO14M{OEK+5IJi1Ig)d^)F^TN4L=_$ zxRG@Gm4Z)M#A?XB3J)-rtO;KEHET7`1R^#2nj+VenNuzYW?^Kw?9n4u;Xvr2AZw0% z5ME73+F?YXwRv%N;LegK4TmYB(q_JkNTygB3f$H}o}mDgB4uf9qj5aP3|k-EA2-Eb zDKx*ChU9A3QzABMyik08@NxDWV`1!bB+m_OYAtb^;Ve_TJ+Wv6d&r|TTe-#&=KlKQ ztHv~9R|M@ZUq+&T43YG#sqT9~azATu9{9o%@gHa0UZL6qr*yW%fhcK(;E)8}6pu5B znF8|~SdLGiMA5gi%r0f0qD7So!3vB@hn&70Jk&n>BtLAvT*Du=u+mFd5h^feA9TMZ zQv0%PaSaV8RRdSNv~Z&YDmGQs-QsC%OEkTFf#KVfq<^mRxQIVj zjipll5MI+R<-BO0P%^@3acH-npb?0$Ta5YXE+VwBSEUyB&YU-bPu%accRYQc*x1~; z+~f#lP4xUa8=zQ{mItmWG);7^VxnfHeU2fnnb65_Ror` zc~8wo=AMF&Jbp_(jcyIH5z{D|WsgUJM3b4Y=o+-Yz&gM7JsT)S>gv`5*sQ#mpYCBz zL4Cu-J{H&XttZKor?OHH5mc${4VAvZkNaSeQ1#0W126u6yu5yOaB_FCE1U!oKv_oa z`xFsKr5Cmfx6>kFCEmDKG-ILR(iru0fsew=vII5WbSSNqmRGF6&m^=8>VQqKAr{T! zMHiX&f{=tKsAMB4I+vJPth^;Le;}~0g{RQ9d1o4j3T+gxVz;8w%t3e0eu&>@p{(s6 zrz$l$0SwJHi-Ny5r6bkS+;D!R4L};gm3Bm*Fs_kFx)n>|mr;ix0G9Kp(;;q7CjcktTY<*dECLp5u~CLMg*!he2v zg@|JIbXXKw%5x)g8p!^)LUe*FrXYrS^$q(No55L`iOfAo5LSP;?=0g$Con|4zMt7p zr4Fb>uwEul)nITg8Ak}8Z>;DQ-blu6m|(Wl`hGs?IqvD&koOuJJ)gxDc~LjnY3bB1 zkDt=ipuq>2>#J3b6;kvt$AShQa^aw1i>u8762swJ_uuFjOh~E~BaDV9%3tOp>pw^zf*M;$%jcLXJ?IcVR3;IFRS@eMy|yYie{I_Z6?C^iB32 zuW4=JKB%%=B|3GT@wGfg)=Es|D6X8C#q#UOx+><|+F!B?mUC4c`|=AV-}i9mc% zf$M{vOA6bM0JSyTYfQdZSXvI;C_J+JrcXOt6PX?{ND!do1v?Uk{Z{m%>t&x09y_qM zmt8={AOv0BTw-H;6nu4us9c)I=iIpPvS|Qm`FfnM<31@B3J~4r{j%wng3t^fyjD)R z)(522aMP%ob!4jKB@Na*MJctCx*CLpTVXMZYwEfN()hq33ML(*pt9hnRzc&WcpV#_ zXVJnz3c{R76YB^shz|$>-%PaG1B2HdJO4P{j`<`q6T|g!`(MX?ipg)ikg_L=+OFw| zEU#>)KkTs;4$yOyd@0yu_ig8f$N^#=(w+&Ht3Ymp@!tHez4y;~ebtJcC6tWDakCyQ zMfXyhj-^@yh%6e%e}9(EjAYT&Q0BhMt$|Mtc;c$bRzAkzX3_%weXWBmGbT$Jw=5>r zlGy;$;z#|YJQNke(}|!8%C1YuaFh4}OdVFX00)}ds*`x%nbU1s*Jw%`4in8TOJKy&3l}_F$6tzl;^oJVvhCM@HG5RkUz-~!rg&@R55VF}@`02><(HvXB z#igK&lqD#%{rTi7Uu`O|A2j>k8EtZ@dm|7)TF0U3dOw5O!bC^#9ULh!{;08=W%$X-fmDLrGlCw{n&X8 z-2!^)-_U}9#X%jeZm9N8uhqcHsexmA-ojVE1=2I#Ch3r35MIWm$lenhS)PH=STX3P zi(K%uP35xL;*(csv4B&5d@P-&cF?1qMqFSL@7LL0tH+Hs61UB*Zf6n>YEZ9Nf9{wu}id73jWc%wB|8;?C zl~A`{oLf^X`S~1vzH;iKsp>LCBMJx+3!`18%GW>s{bttig_Yp)CaqX8NBDB~{@CU)!vD8Xf z0So9+DIBp5SfyCjL=Iv1EHasCh(j-@%w3>83gm+9wfCyOmr2h{Tj!92;A6+-4x~kB zPf90oWl<`$_@;{_;6<$``J-VDjt5hHgXb3OyCl(_=x+l#~oo!$lUaUeOxBd8C z=Otl-PTla+83He;AsKVZ!tH>_uTUM33$QChd#|ek>tQI-ADqOWB&P zvZjoRNcqh|qQcqa70N8IeK#8zAj%HhAfof!_S9TBC-|6lO4c<4vJd?5+;;0TRCKol zp?VH&FM(WNwMdKZFyo}g7O^W0AZz2Ss<(dhp(V-S4Pc?Rc07>`Gdg`P z9iso~q+`n_Cx~(C9*BG55bE)U*y$pZ+jeDQnZ5(Lj2|WGrg82f^cbTUfkfgd3s0S| zokDaQN`&bq&`xIKJkSmR63jAL@ zhyP_2&2(7t7t1prfws2mSkKnDT;-`Q^yrDH>J9oGw6?}=HuubS;)AGk{J3jjCOT)LM7@3Wfc-S66TJ822!VT#!{a3jenKYV_FQMo^eAdW3 zV=mrI)k84GIe!myhEi26?Su9Q%b*2XGaoA<2? zp!gL8l&8fdnRqsc&9osJ(Lm)QIsM;U){XU|stY$vy5(N+v>Vz1*LQ}8!YsFw5UBtQ zs{x7?gRB#Ek+l*p3(lEcolfM23#uc{gr28PI$+s;=>UUssnWnIW8&l=K9xXQWmiC< zI>KFFvwYEvW^#Hrqgb(MKfMP%)$~zll^{Ed6)8JqL_mPHD&5?s{YTa+hn=NTtj7FZ zjAySg-r}_Ihmg@{q#w9@I>yX?^;8`og82Sc^M>Ek@#k=yR-L(+(2SK6BZJ5I5>z(L zS=w=qd zpsFlZIm!+Le0?NR&N`0X!OmN2p>16qfM2mfl|u8Qr$z^#=|U?NT&B-dLK#3X3G^N zJf*K=VtJ^rBy|)+RNP-W{H`@s9b;+SL5k# z7v3^YlT{Tg(u{k228y*4rcZq}m=0TqG2ni1`vBR1b6 z%`{s?{FcyxCi-slt6ay{2~F1#eGqG&*kSu@%Tp%y1YQQOJ$$?$#F&q7kOdZ!TViIY z(20DoW`qr3@m0rJtA#WK@v#nb-@$K#hUWIzHiK>ZXzgA5yvf=NF5v}vkw=WxhtP#~ z?^{ZgumSOy<2mk5ZJjX$#68H!8|3OT4m-(QL!RcBM@L?v?EnBNpnIsvDSP;^uomsg z_zRQU2Fd&Kv~K|cEXhtUvkV?Y5s{9GCO2ONfmjv-q6YnWy6Ui8&s%}noYPGdEMR$2 zYRSt^OTpK|B#Mu2T6UHGn+g=YaMhtn4Ej4veG7+_;CTNPq!piJOvu=r)U#fej^0a+ zF2O%|objZp>nNjXv|x|ie*dh_$ph)D%WB`G>j$hhL_NWpSq~_ywuPs+AOOd*+W%rU z&37|~<6`W5u^3BSTcOm7iX!s=hO2-SJHW^tT65(g11cq`y+0BhjVK+zxx&4lY@LB& z0;F7kJ0IgJ2+7r4Z}4Ad!+loIu{6-4gZ`*&a{QTEE5CwK865e#~ zcV^tpn=tWl-R3#8REW5c8EVbpq&sa$tHbr;KG^&iiM0AS|+%jEfz`3#dS zu3~4v8M)pX68MrRv)k!mbeHU$4Wmc~Gmy2Z2NPnO2JQ(JJ(#YOH$AL}5`G;Q_yp_> z97eK4yA!tEJ~dW04o#(Dgua${(^Py#oSj=+ir(!oGOOG>q~n13Gs@Z0|_AjDHK}shMT_M*db$Vr@wi%xWJNL$i964!4gQM z(VLgcNBDcROGsXGu`Vi3`ho&9= zF$1p+ua882G~s&bR#9MyT&UH-8Oi2JiJxfh zlv>$g`f$whsaB3UNB>zP10H+IuR0DEg@u>1>Gsl3EJVg!Vh_jxxu7>yW{&HW zC+0D!B`)eEyh{&;pfvTub)cMPnSPc`qx^o#=^=`dTix4p+}LE$pP0kDUr+AJDvv1d zDK$8LG7!%iB!1M6T^d0g2B2I~F`Ek?Rs+x%P`qe7-a>2K`jI&2#^kXWbiGrQCPB9? zT4tB+uWZ|0wr$(CZQJa!ZQJOwZCkhhz4tkH--nwo8EeMKrN|Kvk-65KGS!m`#bY-u z6!;C+tif;DFyF|W0JXnH@~ zP4-V-O*`s)pa#M)e2{6iF%B)F3do7K>Q07H7CcBy;@e!qe;xi~AZ1hk2AbkQyC5&V zGbb|nt<898jS_Ezl~L}QRch+F`!VDk@=CR9zkr5~kVC2z_1FtO@1f2WqInro0jL-Z z?=jVBG@8=*Wb^7btN5mBC)$yChRYb5hnKIM?qB9LhP03=EQrq_IobH>QKXvG7KCH- zi6UREWS9O-+K0FaIYpkaVtJTGrbd_S4b)s?Azco=AhOx)Uj!PqzlJb|9$UbievJ7j zi$^|tW*>^&u!K{+2v(rp&fPy$`ZP7K6pztR*+V!-B=yT_Yh%{sJfIOfZ2UM68{cLSNcmg* z;dHE8m1f(SKaxPJu(%p{>u8(QpTtZ_Zh=~=6eiwyfrsEIP5&w%4}cmGMI_{X3g@p|r6 zfMs|v>D$93HdD}@D74aZ3HMA%TYshdm#`H1NmT8bvgo6vSBrne^LJE$p`0^G5Ndu* z7NXP5gN(q;=xhfRioCJAPJ*?y8yJmRbgqA>b-nUa0j6JNuxNvh?jUcL^7mX8Y?J z-J$^7;4SteZX{V+6zb^#W6wVh*WCvc9;qM|4zvJpEVw(aTh?U~5T2ia} z*mKS7FKOaXBQb6yr58}^i5rlJKwJrWV|Vx%VSz+vfg~SStbVd~HKyzK1vNa%9syA&ZH^3OAOh+>iW!DKcq*p(dku>H1j@ zW(J0r1*LOBp%(>>degdxH5yQQ?#&;>jFBQAGTWhOPh)VLwSm%Ni56*4;8HGEK|^2E zJFY(()Hu*L%RHIT?T|-xC&5=JxuBQUBpbs>K9<^p!a^zkJtDyIA&*yhZ~JZeD}@ag z&R_S~JA8eYO0>EjP_79=gIAbf8UWMPBHu2*Jkmt(}ZHgRcbX&`{bD zb-!olq&-!^;fLq}<9vIUVE}T2i6MKeB4HA3)*v@(WOa_t|bf z2t_ZOX^VL)qjFcBEu1*(d&mX_866S1O8ld~F^TM%9p1h3#c;~4heZC`>2TKyYd>}W z6%%?kcNXYd#|RAr`uC|U0ml)N!c5^&ZiT-`3n~{UGwu7~KxZC(?&+Wr>(OOh}0mZ_it`pauDvO^`IM1ak?fb zY&>YcR{m&gEul-atnlh)f^mI)4tsTW;>9ybCW~wSM=O^cF$|fyh_>-jh;0cJ7BmJd z&HX0E3H#tEMk}J|_Vb>3nSG}0$!#KPX~v(+nA0b0lTt=uNKXjA4ee*cnBvbextAfz)Ph`JqBJ0=ah~OD{hQll zGtj6%1H6exNCwcq*7w^;={BP|Dg&t+r0RCfcLFSe=I?I_G$MtG@URxmM`tO#L(?;_ z!9rhFMViTO3@EZ4fme00!dduaTK?>nyz`wTq$YF64(1w`4G%GVdsl#4oJ0zTGWwj# zw^IU5a5pasUQ~3BA!u*I<#XadB9IKa`NrrhaokH!CFGRz&?-j~d3;2ZtH$EumT94y zHqF>)1b0)(*VwiG#DlfLG^d$7`RP^Q1pG$W45$l=6v>lcL9xfHDo$0O8Hg^7e2x~+ zD-SwgL+Fl(=+f#>lir=Ljh>A~!_TcJXZ(>Mdj;_!_qQ2b!IJ0StP8PR9&WNB#mz88 zGl-3kM-R5)6!94G8K6s(ChFC22M3Uoe=1()PWhxe-xMj^ttIY2mpaMUSDE)&DqR4I z)84CIhGKFeR|~%G=RIj22pGrNA{!5r2S=j7$m4P^ zDm)9m3`LFgpUd1MD>7{4_(G1}Lk5k_;t&ifF8xQL#)~sF^yP!d6$RP&_1H&)(edY%e>k@k_+aaO08QZ%c&!jNqecSL)l6tZhY zWsA;=5Q(erNbeUj%|?TL@|jCyS$-PW??Oi$O8wky)32G*X;TzihiVVRT1(x|gBoxC zFzJ!fbEO(ARUbnT07^51^BH{JBJA7Kx%@%dp=k1x*@o5VvF`obVne1j^rl|3lI+V3 zTX^A-LOe$h9xiu#)zPV2!2_SRQGEIf?xcpteMLv(PZV!=@@_K&pZU#IOJ)EH*(IYc zXJBVKWfeAhQywA@P0Yd^5N-tRQ9|kPxU@Z(rzsG>(?p*B&S;w$$@`yRT)TJRTVl%V zu?ydCWg{j-bagv%x(3@C!ny}}R9nt0dPRFaw`)gIXQD4^i3>FedD9&aO(frc(jF*Y zQ4honprp>e*o})@jr>koQSc6%@c#Td+=tB|dvR!vb186LmZWv^D-r%gR8t^ z_FdM+0qxeda;9TYx%>17v@1LW7}T^)n;gA*Z+P9DIQw`4p(Gr`?ePcF63Hl*?$K>H6?{})(dhDjrR(Z1NFmBaq#yZh~I8&`fU#x6z& zMzxobJU~(S(O*2$7~q0KA!!Fo^Eg@}kp6H0FQ#4Y zENDp^Co6({Tb*+rGTAZB*;;#l%)FA8SQ<(J3lm;5%7{S3wjdmArIYO z*JC_hu_7^-=RrO{qXxfk_}2HTnA<|sVIq;CfIlrv6@hg+Wc%H99bdXa&3D_FTLHTm z?1Xc*Md`v*4=n$zvDD;!R$k@No8JH^6|zR(kdz_3oqi=gV#+CNf+ec zK+!5yVl{t>26;T9tgvDR*8C)V$y`alLa#r@fQDa#rI)&Kk*tc!)30dP*t%IqUT!nN z2A#+Bgi6Lu0R!3Nq>@ONfs?~7;Ms^@8~xE@D+IlZyLwj^PEA<|%Q>wC?P}~O^HY8f zN0N6!4s_TylYS}FIRl0?68l%9E>m(rjgeNAheLr7x2}_55q{32f8JmZmMJ_Dh8@8)Z398bH6xv|jQ@6?}cP#Qg5SX|`LF6YH1$CK!IwLVb^!Ee7|7k9>+- z)@fk=Cj0aSp2ZT^;a|-D1%KKM7*x1dzK6!HsC4}kkv9hvGd4s1WKyFCvkDOK9`tLZ z@NLx|ACR@Hv>Ep>d#iv9X0Q`s$&PN!<-=iRyBFU5KeJ}V=fC;x<8UPQ;THIn=tYAw z+q0+N5K&!_Kb#{y_E2Z@#pcXenCb8&Y9gt}1va$vR;-vx8LBsp84sAS zU#|eLA18KccGl;*kQb^vVYD?0I`^jEsaukDk7U5P&tfLCwepn>hwMXG(*H%F zqeB!-2T!QSmzxg+ryh^N@O)64}!HCF$R+8&)4bXtv&X9B2Mtz1%K;-Eyef}X0BQWFa*i}x zw!FG|UwRQT{habAjFQSSc3XW*Cj8xkGkwZA2495hMYobaFw)4@(+Tt->_Er_L-ExK zQoHutfx~T;aTI+8uo#KrD|fkmY59?5$eE83vY*=xid}%w)xz_(iUo*9%jh$Gh{YB3 zmjTpTrVldoK(EhIEHn-jKhEuknAKJ<-L?d`J3&7b2(qt@;Kq5R>7>840~v!Wze zUch488-2=dynbYRpqujZ24ZZO9p0T7u>#Y<_lExnh3HUZ#a6-FP^Ov^kcepqnid_d zX{6oB`DL__jgWwz5>%}p9$#Gh!AjokD977xIE=q9%mNYe&sUj|33a(U5z=gX4f#b_ z$OlEB-`P;`;377J62AGE(ig`6-0Z=t9T8vI0Dt6;tI_?PyaW3x=EeQNnqbj*e#=%3 zeX2>dk!gtJW|gQ^qSTXN3*mCkJW$+0@q{BO@y?EE$EFp@w*PXh!MpkU!XT-malDf5 z*F- zt6GZE{edh03m*?1&%P%knk@VkDNJXszVq;BM6G80OXO*zPNx~Mv7YO@7P?FTDM@n# zL*E`r1tiFs)l2W3-a2x&GjiYb%tiW41<2*-Lswowsw4CQ{8ABh)`Z%K*_OCiaUfZb zb{_b*ep|9g;2dneUf~;o*Bd)>H!Ti~QtQ`2uJrGk75Ea%axdIBJhs!jh-hi}b@tz7 zy*@DMy`HHUX)vFw-Mjwqtp#w+%XrB_y4(-S5UeqTMK=9}KJH@lqTilV-L`NRY_1Qnfnw7-_WwlexneduR+urG>s+xz~+TMPY~(JGg|AVmH8;d_0EN` zJGe`ho4mUoMmmKZZi75C?=89Wturh5gpC|8|w z{CcM{MlQtGv@Lu&3c7@XzdK>vVGy={7y~T$*7|FXFL@@ZRBv*e;Vwy+R=J1qX54@) z%-3{+08VgvV)F_+VLZ4qRn*Ljc&X4BHfArSd)vVQcL0?ZuR1?|T4T)&{=^)Uhb!17 z*-)MPvw5E6Pb<7ivc7k_A!?GK+y&&_qk*IlUTC`qdVLKNQxb4sDqrk$$s9VU;tA9! zQZ_`@W;)7K4&O8;htkIBgJ-|4;S{J>bSZt4PN&0nd^al7YR7Z3x^nXs3ygTrvWXuBp<`iDA{ z#KmQs6H9@LJ2sx^VQ=gyMv&x98k)+4ZRn{0$k4=>t zw^7_ntEdMtuEOCcpL@t&uPz*itI^4-DI7^8%P1W||5b79B+gPnj*);i&2NDCuvRl( z6wwG|R?7?b8t-xV1h8;uGLu5rJYc<#?y6dvj~+a{!vqdmZHUS?X@N4@G1TCXZ5h95 zrv*zLTxBx_eWTY|E^uI%LJY<738M)<7SP8_d1Ns+Y>cw-5u49n!i5AbsmgKFwUw7A z3N_Uz=2lX{;$L4{6D$;QNJDPp*|4Ud&ZjL!pd{n= zZT$3s-`NtPUGbktA${!uXBy|;lEd=@>aAc3@Q~;Olr}t4wulEBLl)`#Ja1Pa)z`TG zy4Fp#r%ER=W~!alW?zbN!Vby!EQSK9RYeUpy+2Gj)K4e>9#{1>3SSVG{wA! z2VyJrk}?f(t0{#FcUhZS0e>1D$vds|@w`dDUs>=5^xc=A-k>S8K2*^e3VlD;Jg(Y!2p4KQPbWm$TIr=|Ad?MMkwJ^R^xP0(sR`-Jt*Q$uRh<+4lGuq!w z>!TI}wf2`oiNv&L_|d_Cnd-Jeq(Gs}a#$fY=X$lr_$Gn; zW$$l;pCW>p^~_rST@aAhp)sH&{vFu>$My)SO2tRo0+KP8oyD)~3?m4C?jleNh;Xd; z1?|<8VS*PF?9^h$M>@L|Hwc~u*ug*;eGfe1@up#p26?7K#WEjX;1pgT%)KT zZ)m_p_DRiN-~61sfv@IiW=@ZZuflVloG$~;W3dj&wt(H$wIaPb!SZX8^lP?gA6K_e zXgw}(vifhr9PohxlZQYa7epb?L_IK4vSNUSk+!iIYZSNxqyI3}jeOg)vx9ekx48Ka z9;(YxUD-^ii|*LOr-)X*|GgBEC>KrOY9p5_U8?slu>tIrbpB4@U^V8$Mp#uRB)%E0N-)zoanOP zA&V>!){H}>Wm>HSZ|8=41hGOxM-&3tqw*HB@s-o*T8 zofeMs$Je|^>RS%SH!3Jf?ciP7KV51h)cQX=Wvf8}60p};CsZdMIFwyxYB0NL;IBxZ zxwCX4O#+A_9k38$w&D(j{2W+*k`-BC2I`41(+Vt+@ZK((rkLg`U#SBMU+BE!$&?dN zCwKjD>w<=-^Ovn00zVAxYs&l%4f_(vs$>mnJI8*?+M&dEhq>@OAirU12dV}<$n5E4 z|2{r5>g2_DrTGf7Ct6-W@0~sf2+F5Fj`MK8#AYrL!FK;H)jOqdTsudghAwr9)#>Dp z7r9Pv!z_O}X3syyC%zhD;UU~N`n|y5tJ^O7+kL z$rfb)9i4+*P6cLB4NyfhPI_#Pv#OjW|bxqf+05QRDQRl}GPkX`HyZdVqGu)nV$SrNvO-m?~il7T$xoWA3wUu!YnYp%m@R*Kx=h2vSVOSY&>x(}Vk?dhGr8!I(H4-V35WjFq`#JD3F2k`-CL<=KM| zbfC<~Wd%@%$;2TK+De|M;6F%p*(B>?*&z`dJbLkW8s14DVaL)E36Kw>aq8P0p^+nb z5#;7cPhW|2{Z*>q5z4y*?2kC|%(X{J24O{Y6C`?Mf9ukUey_ktO)+qGw4q(O^y!9p zZdQ(J1{)*_n1*3_v{jjo@@X#gaw%6_j%1a5K zc?=8wz6!HF$Y~qTWM zE4;;9DY&5n2fTEpylTr(2H6#EewB^)@JbLvqUKi{3auBq=PZp(C}^^@dHwWPE3Eli|2vAO(QwPvC>`w&u? z4hG0G>66PTd>X~BX@sd^$vhLb6L1%8Zjmj*JgYuJ*q#}?{WEVf43obDJQ*Vt>SO6U z1?9cOd%3uk+Z+(>@?*SsG1L3NsNN#Gyde!*b5|{1OlVu~28>VhBEo;^ydmZ!N1v># zZ-9t$XF|Ygm3PV}E!aO#gv4X=kKEE4+h~zGw@dkC{te}t8OmkIpbfa zLK!E~kiToSO+Cr+hJ}wkFrhJr|6XD)>*W}V*7{bQczuj&G-pe}|xO5xeRVs2Xj7CTl?CY;B25QgoTNh3>#`(2r+J&*uQS?-A zyuR7J=ba9PDTjo9tB!K>^Owfk_PcVCIMyO?`eDIS%SbZ^(8t1yzm=}+BLzZwmmi_X ztGYo<>U<+j`0jRXho}juz?C{)_TG}*dHd__g3Gih$eK)y@XVu64%RMz7h1rZ#d<0O z_nK~3Y#A0T86JAHW5I$2vgtjN!AR#Xu{+Kxenp{Uq13CYznyp{>eoh6@PS*m5)eSU z&zYT5#c^@SP2*+SlAAVn|2|y%KAZQ3!{2Y8s)>5r+o0616u7|IB--r?NuD*69(^`3 z7&Arc)8Ak6h*o*oWgYS~)t*sWN-spl^VH)fVpTM2Yptej6}Hl+v=#?CX$?FfQd98BYLk0n)j(?{`i}?%ltY^em z(C`&b6(kgKD*ZO*N}$1U`pbiv6vAlaW^jPe&J&n>Cuz<0uvnZPIM60N$DLO0KU$tm z&L7iFWtW*`cz2XfO{X_MEeVd{@{Hwxpd;@@9NlyLnGf`$uZ zwA2e*@e7<2w^%hZcr<%cQu&Ya3OAeJf#@dCpw_%DF4?Cc1753cR}+pGY?UTS9us#3 zmb+i>B}z-*=yr++O%4!)J=+XMJ|A<<=zJyYher7*vTE|zVsxR*B0HUxEs`CxN&n6o zYA5Eihec2uWHMtT$+scDDMSc}`nYc6{z@aScsOi*dzHR(Yunv?J6TxQ_VUwdSuHZT zo(OQ;f$xF85HU_5ZJ)e3N(k2?B7;&4gTFFvYr`>McMfwE4F_1nO94v*po-Z zSxm<1C(SZY69To!)lq~{#v>cJ8~kJ7BmX3sT0EvAVdR1{Ms|tAE-uDiA{flU?Da3u zUu^hpFU&=)-2~ZV_e;@C_t+OKL*7!^#*M|ON@j%Adv(t&NZNx2;>M%L4how;hqPWs zYLP%m@glhFRSxoP5wX=b(7KfAP6=H&ZucS5^e#iSWcbu=ycVl+(4$(Akz2jeX_d`R zA=nEWm;A&1k7H^xzXq>%CufhcxEY+iTJ4sJ{g+8{cqJyS25$S+8_pzav^c(X5&!;0@@^8SpJv?utOmF5DQ@lmNjARzQ{|pNi)i9~|GMj|n>vT{09myZ zuV+2F9u_~q^Y-9s#j^cEu0(Hzs@iz<=)vQs7+3b$ZzL>Z(+t_j=$23fSHOt0Bi@=W z;(5tQ%TaucG)48vyU(hhsz&+gwylvlG}~&?ZH-;q8e)ETuKzrq_*0rOcznsqf^U-G zgidnU%+%b+;J(qz>WFr(VTgncjDUuiB-t}5`wHz6ga0X}Afd~-zOS*Mf^!c0H%#Vb zTn;yp{9IgTaBcydZ~wxB#s{>E61|_1Sptc|TgrHBC5sh*7$Y|36J7JF2>I83>U?cF zArvv_=%EX!*D0+?b+HcbyVT2vl$LceB={o?Ru| zxFMySrfaet75C)&Oxjqp7bxV)l}Dsbsgx6`n$19onbjd{fwvqj?Go+2uSp-W$M3qyanONZOSMCg0w`pJB5 zbhuC*A+^9hDY+|+*JQXwT)|SON+;TyuA~N({N>cRNGgp80+iLPoRv-T`9^`X+@(Al ztY0pnP%)f(v5>#*fS#EcZkcy~>My2p*1$WG#PzhKBV%yUX+GXVr{S+bHkaMRh08?+ zCqN|`U1wBXN?nRkGAajTG#$g3E(VjL0p3krmRjGhQ3Os$P81+LX7IUbe)uIV>uhUU zgb~nPZ0JMTkTwkmjM!PkUIWj0k>n7*MV#oJ{#rs8oyx|U>Nbc{lCYrRcWp^)l|y$!Hy){7X)dkqa4SMGbe#;f(D&sa7ctbp7bhy_&uiSYp z8g3g-Hi3=m^Fnkq|MdZhBS`HLAP(j;KF90ps4QO_zLSYCZHo@<9=qA z%C?IM{U_CG>q~ZB4%im5OZ;2Q2=Ipon8k7((nXjYNU*jEHaoWLnz=Nv4J~Q{GQc=< zx@nSLqy@_Kck@jVaETMg#qHL>gPQtot%a;wDEj;AeB zn%;2sQk+|)+Re>WmBMr0;K8Hvyt+?!o#a8bofFn;G7hVqch_oa-Mq`y5K1A9REwXx zhlR`^;oq3{I^(q(cByv%h^gzHLw}kbtH1kDz7nOVX-mCYp8V{`VlcD~k}^q8GZ}s2 zRNtR;QyP_8qI?<4j^I7DKCKI*R<53u5LBx2Jh)T@C?3^cwnEHQmyWJJ6kKm_&&Sp+ ze$cddklk?Amw4Pp%yP~!BtN8{OyoALF2|mx<6?bry1cdm`aC<`ES_8OJwG{nj^Fqs zz9F1?XTQ6F4L;)3O<|96UGq%qc}RV}mA>5Q{oXKZ&?J2E{B{+00?mEK*I)`!l-VHO zZqu|QQhWHxNz`A5God6Lu$D8MEAijPExB>|>N*Z@UfQ5{8o#}JP&FK+g!(RPk(7oa zYQIedJ}z``GW`zX*0y)P;t~(ohifuEYk9sNGo@UZPjGy$YTC7V&(bU}=O^`SibK$P zcit&GM^qv{R8LBp<4Q~U5$~T&ukNT`O}%_(aCOVewe-R*Qnt5S!|c94LULyqwG(4& z^8RuTuR?fs@2kH_@~U317p73ro0xIjQ-)E5WOK$d_6q!Q3tf2PCYNe07Pq!UyF%2o zp5M2m%r~c3uQwV=bm5KR{2|ieXwl)I>lxL!oKOk9#bSRze~3DwmAhczN7+)i;W`=Z z>e|F>T?0$8(&i&`tuu9{Z$0O;XDiaR>HqHdiR+&A-b~4I`gGeXs?IQTh0JYBvwxCT zds?BEt$db4_swrxn*J;Yw=4N5p~}GET4}MJMADHTx4WR1PbW7;FVN7tKRhIRE?m6I z8hBpuJi4y?xSv%iDG}0GsQeyn+N#%>x0QB9N$&Z0#Yf)Q?Klh@UjzK9c4KEHu@_dG z*pTRfWqU@CxA<{U+V}G4kNt~Bsw!1o#8+?P`O2MxcCxjO2f&?r&8QS|=X07dNIIK9 zY2bFB{vqJ)@(7C8JuBRwfmE8Utusy*=Sj2cNAo;$hBJ$Z2^vS$x)XUBJ}DLEEH>ZV z?C?9WCJq}VM2_LVSsijlRk`b2C!yT^t>@^SkZ6X9%M7n05;L*RPm2 z`T>2b`RszP`P;mudSH?Eq#KPc*k`q}6X4zN^0U_BqlEqTT=%s_Np601$G0=?bb(f6 zcc;zEL$tHc_GY8gV*9n}AB-Y~>Pt;qubsw5Sjl`G7>Q_Q96jQ3F-@MzhOd_F!1>L* z<{JAG;<4k(&l^vaW$$e?9eETDQHjMQjY*R%QgPh2rSP7w-p%)rTr!(A6NPr}p}9$A zBY@n!w^&Ncb5@mwGggg^bN{Z&5e7edc~Hupm)B+Hx_{Nt(TgX?KM}I36|$1s`$Umn zyU?<^zg78>^0L8>1&!6s^ZB=3^M=0Yc zg^3-r-$3?A@$8r?K+pvS$nfKw4#5O6Xw`Ij6HP7VP5e=Pn-pk-oW z2G9!`*o&K3n3+4nu>WUL$=Sq474T0Hz50K{0M`Elivw5~{*w&%{|XVn$jrv_-+qW% z{4?{<{(sW;zvBJh3nXn~Yvydu2w-LX@8tfs%miR%``thUGNI`88qCM~*tbla)g&gUV^ zs^-fG_C7ORs;ua?#vBu0EY)8;Fa;P#1_3M~nhcw03_sK!=vQgJK7+XY9Ei|J;ZJlB zSoNGRGDMuxcbc>e5w<|7z9tltNkB*#y?*cuXfYuWSRoB<5ltnOco3mva^eokov=wC zV$kF7{Ca;*6FU%WlClVLFl5GSd}EV7FuBc25fEQPAYlpUUjT6+5XjeksGb`2U*n)L zAfI=_^nI*|bA9m=#xUSFJLi24;u62{qQHArNP&e}pk*F~ib(t>-HX ztqD_3zo|Kjx-&sY%s=+lf!xMjLE`oyp6S`R0Zj%ZB*Px=u>ByI2L8lw|70K!ll4M< z6(N-9_o!dKpCxK?NRoe@q@Jrb{<4#YA6CM4Mgjx*;_=4&09j zB(z2f^czzCIW=);Zyfan7);NR;y0GiT<#gbQPMw|TLgDP%9j2fG+70x7|NUoBJMq6 zd<;mj2a#3-YgJAh_!BfS?E{)}!YAy|AhQ?n)(fcCLoDGbN#&9L;C}S7^2Hy^j_xZ! z6kd|kw{wo~5hgO(??vDiiSEtYVnCgqKLuhzX8A^qQ|7$U@agd=Qp{P@Gqn9J(CoG? z{6=*W^}Qxtd#N*aZ0z6v+8i}5UvO*PX4NQ4!az4C^@sm}L<0)1xA>PVHeK2IkQ7PB zpAQ?~&0{E3Lm6afIN8ODikznbaE9pt4HNAYc!>6Y{oNI_v|_T29N`mM6!dzHaAw$2 zyu9zFQ|^v1Yrq3=1@Jik(U7pK_0X1YP^mgNp75rfnfW^Ws7LnwO){h1f-Gc$3Ll*7 zT&ir}*MU+UNz{~=J~6X;PxK6nWG~Gy;uzo1<7IOb!Zg26q)IR>T8=02t}iL0{^K(3 zgl-;QE;lcxb#U$6*!oQ_yUftdk;JNshr^>~ybWEeCc#N}XFIP3WvhI!qq%v=`7}2+ za)zV@kL(-V`;T!EW(K|0&}s8JKfrx2EKt1*ILo!eOFfG`55aGsx~kbg<-nw?cP7;E z<;f~eZ`&rov=M&e!g8BKn`i5*q@t{5ghtVdv*>VfT}Y@#CFpcZe>o15}1Sy zn(NJw6Q5sie=|etK5=P%5$7IJfrHdal}W!D|Dp zODg<^ntNFK)>H=?-wHn(ADeeDY5e4u0=h7{{3`4_-^BBP{lhoCh;s~{J%0)h#AM1B z-YB9aE*ec#ToHM>5@Jn-;QT$qW*AUbz%2ml#$t1GB~%8^>v7}t~9H|N9ULaw?tx@YOQ76@{`%CQq%KiNN)>pDD4 ziSM*xJadlwD#89acbX*wY|ckICDXfJIXGr&|FP3MIyd(hX!f<}=bN=&VIyFK_|gp9 zl7IG3haKTRub6^CHm z3?$RS%cGjI?HEffYpq47#eQ?y;LF2JL!}v8YqfdlWM}F2J@e@DVKveHaDgkPQGicS z>XsF6wF-(Xb*v+H%e2rlp3+{q$@o!G>pW4)&rrhwz$TYjJ=8~pwMfF~ASJ6Jjj17Y z6D~-Xbs9JrbuGcaNXa`d*yz6E;;VKBsWN2IXgTb>S#@!VzGCq)IT=b<^WO%D9?R2` z_%(+fg^KLFb?Jz+d*NCV*pkDx=5u;j*BBT$t_M>dOGZid1WmWZUe>VsZV)6cBdV!tjouOo_v953pQ{g)B zOdw{caU0rSB};MK^QtyIS*v)*I7ht@9Y@4602S;hRIC<}7DDbc`$_ydmKThax27?; z>;6`E@kl#fwCe4UV4D7L<5g?w$?6UOo2XfVRD11zd8UI@L z4}#6{Q&oKlTnxr{(>A^>o5^+Oh{DNBD3zd%L~%}RT+5PC#5GWwOxp5FzZv(#qrFnn z=z5*j=ai6tVb1gL)rM(l>=K>BZ9bYKZOVFQ2bPf4syTkEY}c40tMu_AX!QuPR`0?W zeEVS^eJJdsXGKbT!EL%1!8uR+F>FjWZ=M|IJlje%!b@6Y%^P?o=gHbC%-gl-vv)YA zKEw!^d;-Sy22;-xa9tP)KazU-+7*k`T5v9__7y57%mKWl1c(zYT4mN!Zo|J*?s7hY z&=4v2{-KWRy&AX@*Ok9e)#o_-_)HkkCUr8hzw z1pqdWjKu1!k2erGGAV?YhwbpZ$tT*cOi|{D$Y$zjI|b=WibB=%?i6%1-)ubhpYJ+yxXJ@; zqy;uCuSXaW!2j&4`xuB7l#=R(-KKsvV4Yfl8u9Z?Pu{jRARE}ujVEE{~hH6vL77a^-`Yx=fz#@$AENSy%qO+M+=$yF7t3eB0~xKEFre6)6Z!5ET=_|zKL9^ zs)1Y)9k7b|%JpbRifCbbC*UHso>7O3XbrqM3&pVujC41rKD{iF1KZkT4dE-%WR5x% zXSDY|oL#oJEtOifag;YVe$NH3K*G^Iw@+7Do7`t2qiNP?T)4^$y@roOXeGonbzCPo+`jZwi%oePnqBTfaW_8Yylzf37TVwkW zoEK8zAiqCf4B|nKe0L_TbB$+bIUDBGTW^CDX+6}xqPz8SEG#(OCrL<0L_IbQ9y`Sp zXbdw878KxuC4imklwVoE%OGH=SRcsT0IQ9E5ZG*&9e2ogpLhV=T5gX;i~bb_$XRze zxt`K|8xnZl7T_d6_2gm`K`l}`AO8=1txQwEW};)_m-otH8ATxh6T z9?m*n2R&wWUZDzivMEoi@O2;0rsWIg$EDFsb5@`bVBhsAkUbR&=pyboaECZwo?)=O zjQRYvvn@eS(=?Xr=3YQS`t1}%N0DLmF#TH%fdIhp`%k2|Hg|aK(L?Z}Ey3{`4Tgp} z^k7zNDD2^;AAgz4o=L_d`Mu-)c@q&`lfsSqa~q`EID~tBDnUhwtIBtWDQiE`PLPco z#Ef!|k@pZ&_t%TYhCt=#r)-tTT z+iIZ~xSVizuSgQ--#wpy7faCNHtD%^Spec_+L4Gaku~uOreKHiT<5aGJ|?(Hr?dhM zxMNh;J@ApoZdaprkq}fOW}877^GUOF#pS8ida7G?=2_CHT}e>ze=0 z9~_^#AGUI5PM4Fo#>|W9?;GnH9!mZdJvi#LiN&5dPIZp`qHsbD6{nT#!IrroJ$dbY`YcgVdZ>_ z{p|T1wrY2W7j`A;Fk-j38K}J%mdJhiK@y$`RwSC@J*~TUs@C+yNM>1Bwsa7A90pj? zTHa!g{ehe%50@Y>mvSw(M~l(xyqe1cDEk-5-{wmy6()A2kcYPNYX+9`+~S(9Vd2&B zvoY4%*zE6-`Ynqo!#9WszYRYa`gM?rvCp%N>rB zdU=DJ?m|X}K4k=QdHl=@qQ%nTXf#=GX{QB_MR|b&y!)2I=-iF4j(~VAD~z^Sy%#pw zy~LUI&l+R_R=1MkeWC$3w%HMVdkMF{r%s5W`zYrv#eM9|Sl#kyEc8xb#p%y^257p9Ve@>)$J`!AiPa$dtj(1w zoNSAhIxM3M3qsx#rFNxlkNaQeEGsilKEupjeMP2-vnbPLP^YCUqIc=Th`}P?1>2{} z|A(@BjE?Q;_XHl>$%$>-ww;{VJh814+qP}nwr$(?=N7nzM+kC;&Rke+ENQl`&51HSAb zz)V{!@cre$${s8tbI;Cb>X+5iu0|ubKJ^Z(!h4eQ>mPDb;=KoXU7HrA-3y+;6|?Wx zHw;bhPeCaT*7rBI&CtcQ>KAv%Vw69Xa*Bne9Fu8SIQYMAquC-*q%FA2i_n~_9$vGI zTXs#H6Lp4~d09uFrgFpH@+9kn4%D|FT_Im1`@@pgIK%Hm;JA4yD>Yb=QyjPF-5Do2 zWxz!paDC(rW)BWeC|5x_$DSznzZeU-|&-9K0U|B|}@!_EGmr0!=}@M)NT42*$= zgB72Fk(v2_v$`K&`wyvOU}X5gxqnj~Gvkl7{TJ1-{`2O4P~Febzp0MtKe~UY?&s*= zRQGc&*8fg*9RC`*|3-EHbo}Q7|0mV4{^PTXKZEu!x?{v={fB1%-DdhnKL1-ApOuaE z=ZycS)n&AS%43f;(MEtkf+X+2T)|xZyjWY>+ri0NKp>GIuHXdx+uM=3`Ty$K)J=1Y zuD*A@op)E4r0t#SbS^blsLBC?hQ?9)*QEMGiud(yCHkhuhJxpin44W(K-rtf+tn~N zfzvb9)zA~k%a^FPuK9hnhV$nF+B?_U*Brmnke$IWI)he+V6^)UE>dp-+zsgfpvnR; zQNcqa!!x1%4Lqd_^nv7LzXv#|l%|OkXae|Mqb-I26j;Vq|h_c5+~8Y!Jx^?8er%2{6SKP`E-%TdOze5@7DBD8gr+z|sQxMJf}z2jc+9 z-WfdgQ+p>G(Oe0Wo{QAl{f3NbUN?Kf$Eyw?;BqoUKnDO;^lw~kr~msl^@kE56BRJ< zibRo*t0+7@gEOFmbIrHV^R@feFV`MjUvLKyalZ?JsU7yTPy1WdAY$wc8vbsek%57+ z39y_dM<=HPK%mK9&77OT8QvC}#(l>?+ z_#%~-9oxIuVc2>)XNvl&CV<)s2f)zO)Q}E@^bVY{zA5+% zJ#a;j3O69WT#Xfeey2Z`8c)yc^(e?qv8i}-&cxn(QcITkDvas zfBM({>HorFsQ>meQNh1isIhHEAI&pq^RT`%lBGfCwqpe z-k&WpQf5<2pqnJ6*9nOE3-2g(wO1pOCL?n5JH)RNbDhJ_M#Ruo=Cc@}2aRlI1(bWZ zQp?ht?n|{r)=+2>JE-P1oV!84!;C*Zg)!Dptkh z!?o;RvR|%~3o95KDBYoUe~JFQO`jRQI<53Rbsu`;fWp^&Lm?xx6BT{C;e4o!0K#qI zb#wISd_cn9)1e9YkPUvE1GKz+$EqFaasYnv)a|5#ek>Q>;yq>T)O;H2x$3$ui$L_~adhj2g zDNN2MIgwN2=#^6G9!&Ka)%=9V;|U9SHnb>Q>T z2V=5-!*-}AxU>7cJ=WS2>-bTL-EsZr0;cX;)5(AGyYc0D4vYz$ErH_Y_TioRrSs8Z za%@+wlEL5b;LrKfvHcDZ9s4J5?;CWhuhv%@+uT3MT)4UeH|{Wf&>QB`R=6Z>?PPp4b2{}bQJ1{C+C z0RGSFn!dRXe+rutC#yg2+Ku#G;Qd?dWZxsXNOG)SYb&y8GDpz&)4%0f3pZATdB<|3 z&ElP75qgkbA>uBQ01XMY6}jP+`lJP9x`Blqu;NkTvL-D?dYIH?zBUs!a{%kSYI{D*$l;XOjcezK*FkjS_vzK@wf zKSlh0PU6$`oYlKSDw8qOAGY7shL$#w*LOlgerlIm_cBH>80e}|FQA92jl>aacfqq0;pbynn}eaUou+mX&xpNZ z@o?LpAdhacXf>`Bze&NmWbYVc+++b0f%?rD{28(iIkc-wz;yg1>2|s1Lo5DFP*&Df z+9KeX_XNANDs;T1MW*}m!l(rH$mI1ko;hCGoLIzOO;rDc`n^K;k8&N$sdS^l@@C7R zx3sorsk=@q@yu_4WFLJDKvp%(?mPJRJ0`G8=)zN1s`xAr@U#>_n$y5shWymtxtdSV z;))2|g?rK0oUCGdhSjhZdM>{a`i|1Dl|X3~SykC`Sd`{!FSjQ`9^F*}sh=p_?<3%A zPRu5zXnm*+m8|u4hbO5g9!P8iWB_>wt;Jqgn1zA+EOf#AyH6OL73#sxqCMOhdm>5I zkC`ARRhlW|d%~GD2iYWyR8%8zqmfu}EsC6zmj8MI!-?eS z1irtFBEEmDJ1~n|7?DfLjGpKhI5RqFy)CVb&^I{V!~Bs}_~cOkw-f4lMM*8&_F1sY z0uL(H%`7OGz;SLA#>0CZh?Ez^%29{SQVC>|I?U4hi_f&>gyeOLc#42R0pqBNRdChSB z-Wsq_%laali8OD8W0WrSV1qVJ8sbMS;tpk8jIr;g(-Z|mObZ92Ofumd)ak^imwp5G z8IW@)G9ukni%4}D)StaCYiIlTY_catOp?_I-P~J$?WW($Qo?(HQZJh zAvOTrx#e@j1Z?d+@lEzQ4ujZ-B3TYg@Onz?PU|ww?wF>Q%aAad2^`tjdO$RJkncSr zuQSvP?pTGJd>de*B%<8c?gX~=s~l7e@}flX=Pj&_BQ6J-YAmdv!VWE*GKhn>nHY>O zgH6v^g1PT9QVsGopJVDZbVa0XR&+){yO?!aT+7XYZ+rA%k#Mxc@6Glwai*Sz0W6{- zQ9h1ZN#`RI>^kF8op2ilk0R;@)T_c%1{UIf_O0LSoTv>+6tL@^wSX{fKH!L1Ae)yw z_mVZ#9&3)QRqx@-F~Y$`X`E1gE+pgPXl@mi7d^f2|F*eMQVZ{{cs1W3tk%DsC*~aZTPlc6~Y-YVyqYx!22)k2Gb9Eh+0N*QZZ!4h*WY7%x?#qXv!}zAp+8<+ z@~NW-^wD~Yf&m)cslfhhLcwOY^1Weptf$=uV^JG*A` zHCTjoE4u6}QXVbLXwpz)Rvgk$EeJr|6hVo2@9#4%^?*E!GjZMma+bR`hAr31ONhjh zhC7N5Ka3*hmPCB9#z-75RpfX&m~1E+RSD;Gs_4hX%tlwC=X*E`Tw^qVDp*w8OtR>s zH`7>HL)D9V5aaWevgJ3|R-iWl)>^ITUdPFtBIm29BJEBRj5(=atxJS%U(`IF$qJ9h*aexb+p!y44WwPje4h0;vnL_Xf{=dI$#=JX6qeh_BnHy08J$ zd@+kMP@y!G@sEjGN)lcaJ@4~QA(!|YCPnspuwVQ|9xQYVA;AN5%mS)%y@9Fjmu^od zL&y2*mZylCYLtyjvivuiDlvf)!#k(Xd zxD%H{zO2CRx*71rK+EOXcyp|&Hx(*&Fb2Ao2hlHc0xrfkKc;ybhcwoRacGyx=HmqO z$9QWP<&e8a=1%tm;yb?RhIHEAT0SGrB9j=mfk5HF_bM-X1)IaUXd6=}<$!~Bud6YJ zy8e|E{g)+4B3tDiZv87$0*4}sLvmW47r)P>F-2F1TakM$-3IZO`L4|Vayg=* zj-DZHQOrk#!m{Q#yqK(8zJ%Y8*@yJ)?gV(aA16yxDTLlW-enW(lgv%Zh=7tVMXUe6y$y?x8@ZoLT(gAV5tb4TlCWzLXA{g5Lv0BE^Atoj+Wm6+Bl_!jFTd&-C-mLX1QD84Wlv~ zEoe>1rI=4f}i$|Ob%oM?RWgT)P zHXe;jJ*`)4R8sa4HzQ9;d#?737&*CaT;~`TXb$TIp@rg9 z=sji#HWM5w6ZcxD{ruw;aiF$MFhM^`juorHQW>Dv44Hg5o0HMjIG9~GVynZkYnM!p z8~Y9n690PnVTb>HVoZ6@*!&iaYu%`lU~x9&1_Zn&tW4m0Rw&_O5GHA8V!lr;S&!C95vP}sc{P6-wcqr`mXaBjz-&U@cPBuYR1}od~O8ogB@lIAd@&E`Rbr@ zGbZ&Y;vMR)8ExrvuYsvmwmW7NLkTRZ2^bcN0a_^6c5dm-7G%6WHW8myO>?JpU2W%0*qmpop zq=%EfxK}=PBeWDVl#^IibQow#OH|GhmUbE{VDEk-F79AS=H2XGHfdzE^}V-%*2)$| z(A7X)vb@4wQyO0Kw7T^vcrQS{S+Y6z5ZKIYIa=#_;VdCLmXn29d{7McO6_2ub6AS9 zm2Bmr+bebY_sgIH57cYjzH6Y-YDmqp4b4`GNR`gyku-wTkm;jSto9(x6(c7Iq8a*o z+hMmsZg!q2jeRHiT{k{IF*E%#;CiLKjYfHQlz75ARP2MpjUb_na9Iw=cvR}#<57r;X|36PJo)*NC`VUx zTw#9}m+{Mc?Ru1Yt}1me7kC1jz|j1eD1qri=~}K6<5283CQQ-^I_*+&p}a)*VbU}~ zGgQ_tRtd~9V(?@VjAM#qkA_2%kJt-F^Q1!6b-3bv~|Wq1#GhMsE(v74?{jVEBV*H1(YzWei^)*imZ&6l98#c zu~;;8eD-iqPHS8m9inRzUcL05J6 z?-xYV_@~XJ-`_^CJsPr)HJ;rs`eAP4VJj@hEu!|mCJlFw z^F<}c3mdE7BvbL*0yGd3!}=Qxg?5W=bz`qbQl#i@y4>0!tl0V8#(;|zGkB#6@eV9w zBugAIxQ#l_77bCUE$Dj4^al0!lLp}$x=MXM6}aAHni^6b_>*d^nU#Dz7iGo>p}t`Wx1HH3F^%y_{HmB=uXAq%B;WdYrxl&J8I-adtKiv zh10gVg(arurGW_N=9Q6jwY*=F-_bhu@W#luN{R@qg|>Z8ZG2hn`QT>$2yD`MyT-C8 zkXbUmsZz$j8kiud-PCO8OgUwMzyK#H97V5Klw ziwPwqDj)2u4xoe&aqxpi%%o5c2` z6ub;xa3jpF*jJI+MjL>*v$Nc1>UQwMHmU!&Rg3s;=sEZ)*X+ppR2aM@H zSVo;+I7L}^F;|eK6P#W{bjy8=!oki(l@Co7moY<@tttV7Jf05ypS%uI`&3QksAiZ( zG(MA|K4VOLjiliu(cauTQgy;e7#W_)Ug8BNkvj=@^JGYtIX9V2!D9GBr?o$3PzwX zMka4Y2nAz{NvADO1l zd0l#&`97xd}{Dr;KeXH=)P+l-J-~h(+et$K91KkoPpL zY(Uesl?0A8Jjr79o>tJW9guCRwI%gOD->*My}I+^E3CxrplFB^09Z6twp|_ z0SE0JUDJiV%q-Pf8Gfl|@FG6WeFO8A3Xj1_?0&d?el`tzKf0|}l0A1J$Tk<+2c`YNa@o65zabS@!;L0!I=hzu3 z=uK%k!7ejQI^*O=$DndvT#UZgma#Y-*ZcvVD*mg|+E$j(NG!4D_}y`h?(C6%cqD(0 zK)ES66s$=mI#@Y4(yPoRk7Y!6rnu2P_}EOpG=ACM*lkSc;Kq_iKKO7J0auOC5Z58v z3z7kdBs#+vS9O(n8{03@^tTQ}YshIu-U`~#<$<{N`ltT+v2yTR!0gSNq*fO{*tfvIbC4x0Am z*tmNUV=P;vPLeZnsYh*1E4za-bg`2g){53*AuK}5S(z>I$7r?O4bmg>!MW9N%)OS( zd9DIAb*~14Ly=70^hQ*Ug_>215sS^WW%Fn0Zm3j z+bI%BKL6W$uXCeUw2ParCFDAAzEEekZOCV){(17(^{7d)&eRk8O-4u%)2_AK6&j{g z`)bpN>zs$Wfs0d3ANDp9N2p*Z#)nK$k|GS4DKe}Ex;c9(R=rG17PFOS)9!u0#(=S* z#w|_AA{mbA2Hqs~!+D^!+ZoRuY1p7@{U00|sE=ub^!@tPI@tLN-0IY26>=4YX)>Bq z-l!!FxW+tg4vF*hSC)>|6dWD5dkKQl4~|2ktEIGYs^ZM}y9ky{^$&J!=lbQAt0~K{ zeZNRm5pL%OBPkK>O#@_e5at(F?F1IuVoz;L;-x}iMJqbOuNNb<8dzUX8I*#1n%eCF zk|MsKVvlXgbX!}R({-QPjq~+ni&D=ZT3QIT5+Ff#_HT69qwI_s0f=Pzj*iSmIqma) z#o8iUMGsLYoT6TjPr^0AF-$5j=Jp25pE`f8J%K{iFKNzl5SuFs>w?2h%s80Zy@9i-St2S0Nl+*&P;s``tY07Gq_rPlE%OND zQ-^PN>_u<%8Rj;*u%KRO?=vmL^!)mGzK?la$Ux@^ABtegXwqJh@W9@)1o%}lYaZG8 z6S=2b`0kcjNcO>KzeL3tukj6e;;I#4gc|4Z+5cRK2ESoLTtIZPMPwm%UiydYbnfQn zJy$|8{6LC`sE?XAqSa77~8_5=4v8p%m>*%0>@F^N%UpajkV1r-CGiYjW0{9XD69HWgG z?*>?$%h&bTB%Q%{-_7vA1S41{n$SY|;#lP?@h?Bn#drY=b=Y0(lufnTJ{KJ$iY+xI z5ElRtTI~vg6B{nUBXT2~&-r?_m>h+vUoex_{K*$0kG;jfkW{u^QHnQ3)f2A3<5ZBC z;hpW2G>eVj;9-0Pm`IbIUYiNX&YTZ56I!Q$v1SwV9v8N%%);mZvUid$aBC- zTaW7Pl~PNT#4KhWDWX=oM4Hve0Xn9C@QTT=Pj7&;3c(X0kYmJ%S9H=+;*T{R&l&ux z2wCb*9i&QWjnw?r<004m$C&5co0{A(7sjx;F7s!W@u&)jZrzp1b4?8W(xTya3P?!d zn3?gapI*y6l<;{ZHH!)37t7&V>Kl}V#9xTr&K@@vn)LYvc18#6a?+%n!eZKJ3agYyT45DRbl(kpOl=QPO@8Pm~b$= zjL9wa6Je~yk5@*~jkWq#EyaIKB*<59Gxt^6rtl!YFd}_##)2u`l}koDX^3^^zV}0` zLWGvz)8HDOLVFQ3@Jr);zz|t}yAi7VDf$DrrG2hc;=icd3}m~~CW(cCH`B`L60y}L z-=1sZSy#L*=G89{nHAu%tx^2Ap(pJgv#2hlps<_CPFwtVFk?RW7>IT~SK|BVPw>kZ z!dY=Z5eWXRiqJ;CUZXWK&)ukVrz;mju0IgVr@BP!rlhm*E|8o4uFRf7PEnVo8+n@`poxcKRE#d{7D&Ash>Kh*yzYrm(6+0Uzu2y+dpLx(jBS5jmv&$!4q>U z6?bG4R(8kP^B*-L#tg~FJd}u&a1IUg4qEGr(I}u@(}6xe<;?zV=Drn_|MhZ$R@vVN z@JgK5sCQwZI`?U&bo9{Woe|{j2cLN{4D6n(uiPmLY-*=s?H_SCUz!5O>5no^pmdBi zF+f-yxNC3x`upBjx&7JC3oqZj_F(~>!zMp2y0LL#5A4J@lS*a`70c?2FSL+H8XBJ7 zrrVR43Qoa3PTpwF6QUr}=vE9vxd#i_Es?rA6^NJ3w`rw70l8RV`4Q2(t49=u7p5=S z@}XQYQ~G2>di4Q@B1(*&W%LPIy$oNG|0;)O9|m>U)Pkntyh98*_maI!LX&|1IYq`s zoKNXBHe(p&Rfi9hn{0SBw!LW=<^Y=EY&kItk$ni2H3K?X=64c=g^?yI-kO>4 zv5O|b5*aojw3hBFB##y?@Z8q`zXo~?@mGZ3P*SrJvsbfuYV8KunlchrJ=95RyMo=5 zYv8z-)RfJ`(bI*M3b;*I)>1w(+1RGl>qT5^h=EAHf50!=P1_`3C@H8BK*be2DD5#X~D+Yp2s1}K9Yx>9u$ngq!KaK z%$J#^29E~!=zk6122JRe(uzE}a2*M?ytk6G4SKeorpaesPY)=!TKuk>dsfFM zhNWX4u z%dZu_cOu}3cR3m!1bjNW))8%a?2Oa-c{E@W_KKHw&gD^4#UFT{c^iq({UHAKP}TmN z)_yd@TGrtQ=eS4Mx_%vUIg5Hkt;O4Xr+f zZ1bQ2wotjpU`D&Pr`%MIN2y5MyEJd6XxSuigi5etON`iNsg5#=hZH``E-7CCwcolL zkv@T|CZ*%M0i@w^n!>k;K@4r#6>xnsYS95SKl9h#%t5|Da&xUc#Yro;#{oX4XQF$F4DU4aC2QW=(kagv_mqC`%0y&AG~HSHj*yp}i#ye=oUl_xs7kP%K3zi68!KLNR*yrMFSDc`fHjuyhDCAdkDN_C^95 z;RMx`iyZ(!r4a*M8-(vH=M=37f}1ldqZZY`pM*hgggVV%H#993tq;I0{RQN2y2es( zdq;kgvaj-wg10}wQmKvub{XL&w+}!tUZe^`<12+@jyD&;UlUQuEQl5K#|;E*7jeD- zpR=iCR3Xli7el6CGvqTSIYOhv<&i?Wwaulg(0V@6qsC5uTctqTpY^TlCTu{$=0}{wkclJTq*nD4bw4NJX1=51r^C}{AN;b z5UU745?pF87Z;Mr@l;IVJ!x;V43GxZMrz%vnNXrY@^$G#r)c+$y7rxNHagWT3F7$r zi)7IA6CFmj>sW|xJ~Fph$i?3WsdwxBwVq)3km{&2xkAC)cZJjt=BXeqd&#NAeEddE zSwtGXS!FeywQXr-Mz4czd2D6P*BS%|*!5@a`2a~38|!6I9xRz{AG8q#J{KI}YpQ-3 zm5Jn3BqJ}Wv*u7U&L-htKzi*M69Dfh+o7Dqb}B$Q1y#JWOPs!%97=Ar_U**ydHGDF z0?q=a!$`5lyp?wnexrb;1Yg?&id~K}CxL{u9eS|1NaPO zJ@uN#a%#U}$y_#~X&fk|F56bH{Q(OLpP_M*r$zH-W=Y1z*geYdUZc{(?i%M}w&vhA zl;BD^L^Apn)^>)M$4L_icT0&UGC=%Oy-~oJfd0i&6On6}0h}p-ZqpCQM!rNKQ9L z{O*3zp=Y&BYYo^{w$oAdW)MeAqS_6J;hgZUa>b`ae@avz-w}+zC z%}{Y2x;a6s!{@HVGs0_ubi+%R0*SA?A{qkO?<5(de-?I3Xv?GS_cg=kxlC`v7E6iT zb}-(svJDRK-JIdt=&Q)4!aU$2rwx+&>;shGUd3ch@4dE}$>g+^+ph9GBFG*Bz_q*6 zt=^GUD!B#cqm}QwS=U1fTQ1SLAZ{s-Q1fEc00(&Ptmq8S)*xt*x3G5!3^nw%@$jAU zo}vTF2*Lk&d2j)B)A8eib0^h_yBIC{rYB`=a3wG;kW_mvS4*K#!$PV{QH>-|AYMg0 zlrnN>4q{~nV3a(?zJLt!}fR&6AME0 zSSY{??!U3j&9Vg&-k2p{4V@*FdmZbJ<|GYiyA!xBj>4*p89w$fLVTMN1zMyqU^bQF z`0$(dH!4QPW>pE!7|?Y0cpp{}H^o}&KJW-y_9l_hG=G~5xYB!I{Sbb7iM!LU?^zW+VmMi1{G4}&ndL*(D}`RZeB!%MTf5=~lmJs| zST@M~Zhn_@cb_u}eqF#4Dgs(0zZ7}QAevcqz9-wEr=o#8uY}(o#P|JER)7l_=(bp0 z^C3&LrlZ1LT|27}hbQfLq?%6yybd;QBZF%!`TMW%&8@nHKdTP8e=jF@` zuqX#6;Rmz|YcoQdt|q$_Ez;G~1Hz=8_Q%EY)8;19J`S$N!ISp>+0}LV!TTCGkUrY( z-nd8ax8we_t^Y?*Ad$S-)|kBr6JZ6R01j;Le1&5?mmFV!j#9?oU2HM`JQVbY!KZVJ zgYeXWjlY_A&X}Sq$B%OloR)sZIIg0_+TbJ!?1-U zZLJZpo~An=W^?6dMuiJvc`^Gm%^YbTy9acK6n(WfL4E(-~ZqA;VV7Pky>bR>{NK!yEf5N3j2`J;38k&E>3`8=?=^PEmpS2X|d^b)=r|n5DV+ z0g0W@*wLZL3$F<4@)oy9TqN;qie&wV<2}C5X(zbLo5I24kwCZAU30J{{B1ov3E09> z0$ySqp_~e{+9f`kDhS`TOaRx%Cn0PGAr8!Nug)w5D#$@@y&M%r^h0$eo$Dxxt(89!Eu@TqCTcE#C zP2$KajK-44qYh_={>u(A4=+aT1QTDmwS=dDy@xt^w+lFT%H(O;4c(fBB*rqEfq1cN z1zvIxl&c%)Xt{e=S3YF*s9S~3V-ub}3*{CLP4BfWC^XPzjDRc@)H7*9zcExk2C=!u zTFkbH?fv%4%g@1%?0WNq{`9jY;tFLtfP3#BBe_HcIEty!3HtWdi^E9He_$Fj(JIfe zGTnO2`|4b<4STr+LzpX-!9v8N@p!uqTxo8*G&l zA57YJXzfW&9JX}r8gGPyFxVpef&2M>za>HOLS{$4cPV=4@LtPGVyf>b*li9!Q6&}G(qUm$7Oo#XJY!~Ku z4zjap$46Q(DL!JL;_3-8S%voO}~W$kjr$1o{XOamP#4~T^xz+S5Tpf0qRM@=$oVX$Y-5j09m>k)W33Li;l%sG?hz|Q^?h;r$3ZHK zVQeu%!==g_^>E{?Tk3nwIc*Uk1j=&xo4Mpx1u(kwJ^hb^Y-IwT*(d=k4Jf%~=+C{& zJ7WKB6-N^tn`!yb_s*`kt3ap6d*89JoJYP&`|dPO(yX(tH$N9n&ttcSi9)l`Mz$a~6k< z%RgN;@)hmwZ+ksP@zv$sJ0`>3nfYyv|a+Q$| zA!99eALKThS&VK{_YU&AoW{jBH2hkl`K>pQ*i*Cc9zJs1#FgE*I=9wHMa5xWICU{k z@aV8fuCoufyUfK#vg?BFDq+HuJdv&0kKBy)hQ8ygRuEYf`b4zMNTxD1AL=80%!L96 zpoAyx^O2gq+iG%C7^&M_`1{oBR&oNx&As*AA2p95R;r>QhxFBkg-liJW zBRj}#9wC1?08DqyUcEioU{gJ2u29jx6STo4=h|qv`}IgHD$#HXM(0tSQuOl0;F1Io z=l$7Bve%H^u!^*TlsXlh;ba74J?Y!D8ut4)}E46epgLQ6tZTeUlo z`iKNvdVymwrgV`u}4+d)yO zxPJ-oNeH8U@vc`gXOS&fl4H@bTzOGZo8?AgG@oV{C<}x-3O3R6gW1v&n@Wl7ye5bs z_#+;|*r8HYk1cM@o6Ga&X<~?L6mq+6`Dv~;dU${L+g3f{oYk*E!*S1B1*lJS&KYtc zE%>Y)+k&&t)by}$L>cWEh=8vFN<}HsuDBaYBLfr^NBptkP>gr??adFO<>)LHHu5l~ zP%QUG;;W)Mml*fqs2E2W-sF1kc7>_jM!ILtS;^U!&V_+F7Jtr0l8u9~EfpMh_`a+m zDAqVr_olou$S9Rymnqp{hnsj9w0hY+Z4H$y<1Fa+Q4JFmMI$}>w`gzb{= z731LI`N>=}f)~bR+YwjjzMwOBjj6 z)zVAbGa-Y3cCj6zYCJ*{W>fuMx^TtiX z{`%^?z1OI0lwA+sL)nz#$_@&s4N}e~t{w&9HL7i-g$Z)DUAXqD@@5mN+iLjCE${CPt*s_5}4^9?2 z?`Bs^8St>jkXJL3&lfuqyB>Ov2S*cOHiL0W-Q0hNcfW4G?d0_>#X3v}5Jf;uG`S zS$5c7xTGzg>%V!RYBywbRq9MsR-M$tgA%ACP~N!m*4ZR(O{WTUGESg`nAUV1XdkB$ z!_1^q@}_PMV#-LOf7DcpV=Hki(s>oQa$83*MiD0DyZED;@*HjYhZ!k+Mki| zQVUo>wsToHRT;wUg8q4NHCTG@{!$d0Y(%`^vlc77nv2rJ(&h<|BA0<@3R*T+4HceK zg;ua`jX$C~_7faW#Z3q1nJDlu$5#VL5L{Oa#=Ug;&_nz4ez)-AIPD9X;t#y1t4Ccg zo70>dFsi+Tu;r(vDcVB0FY94BWLzqgZSu!2C2q@9+{s&y!!VXq4=c74`aqWGL=c9M zIt}GG;NYk?=&)vs`A~Q~&j#6tQdFU~anXDEh=0q40clf8H>koqS=_EId&rf@>{D_V zYWUZ!-}t)AU6}yWY52gCtVE1d>_OG+V02GS)#hS&F7W1@O~nwP@N9l#NYYjnAfcwi z9y6{E@G;b>c7R6gI569=74SXlrBgxl2p9>2h^8*(wC!xys?aVUe9FCoS-n%O#?JI< z;J~nG}I(HCe)Rt(7XUwiF2HTMlO!_D>E$z~fB$EEg$lrTJt+H{hhnflP zw^&`go-U2^pTZ2#Zu5R&40JhfgmSV9&XHnW3&j5R3+~POCk&UQchrQXCeqQPfpjYk z-HI^Y?Tnf(*_q#>~6C z$k$1zYtQRsvsmeZHi2nu=Mg}LbBA(TQ!J~uV@9WGReOAAqFq&{t{zWNi5H8{=^J;B zs(-)v`?(yU?8V|a6$F3p@{V}N2LT7v#ffycOsjAS$#`dts47fifB9d790zdnG!-sZ zbX!SRlzSF)nYLT?)BFzrF+k40fQJxErw}UBag!%174o2XkwAl;d50W^asKExX0yhU zJCJ!L5KuLWTc?c_Uck}V=BFKIOpvm(B1LS~|CUn{YJBkJG=MggqIuY2m+)hpKxS>= zb?$StT6zH9=f?K4rMCo&ZzZiPw$<<8E9kv-aF9uczEEbCg+yURySr_Qe;9t0lm0x0 z2fS&0Nj^@!DEYv)-D#}j7|gJ5O(E#J<7#;HQ?cu1cw4+cAO!z0)L@+6i3Pe?{`1@u z9Oe5#Jf1t@`<+RjRxCCmk$nz(s36DSPKV8E5$TVi$S-TtbaEmRk$9iGEKoF~S*bzj z^#gAr$d!OLgvSA6Fy;*DaHCQu?_zgJMQU0C<5Bp48UqW>)+3;FOyHPr5*3H!eVm@yoj{Xh=zRcnI1^!gx{hem&NThb;kw)LM4XtDgu@y z&cx68yzpVL)spHdv>eBbhbFvB*ERg!Dtvt{E+l7o67Hc45tf)oul-1ryg}SL2 zKqdA0{8}0(Lk*GX`@2wBWj=ry^3Ld#Sy|guPw~7` z38Jw=Da~ds+4Pny@}jfjS0X8wgk)WCcrs?D0_TPK2Deia6Q{B4TG@P|JFlv>^sZGu ztePhpkfJ%d2E|PnruYEqXR>Z>Bu`JrbjWukjte$)4Sa}PNg{~DrFtUvL_X>*ZJ?vU zU?&_>9`>63bYdb@{;3v%OlB5CgtR{^Q!I$7hkY4DTYH!KA!|VrhB+5{D0+n8q;S2n zIw)1nSgE{2mq#|T`8$R!(-1*7CsH!Kj4)su*9q3$oa07BhUQ?4toOM~qyzScRrfGS z1gn3!(;N2TPd~zq&7_}S%NQnz z(I3K zSntVS`88{>u#~Zm^&sT!=LLpJq_}x$t4?9ajkO}uM_N)39%ZQw&-w>Ic69uwgL@>Z zo!FAr9d5`f&B2v40d))RyO3K616!oE#kAM2Jv?#7T#VWJ_Km;Iv@j!naJ2qEY@NfF zD2fs#%eHOXRi|v*wr$(CZQHhO+qT)a2R-Wd6?u5K8QboL8^bG*hE;;-t{zuK zDgNM{3dM-9o2pH#Jh1r0OK39j!hVAn_XWSu|g71ZxnT|&<#i# z2L%P7+L3-~Q!D87c_DnUhjJ}B+r@M7WY&TOwAoP;H}cz(+EbZ9IGf+HD9A&l;QFDW z0^M!vFjj3n@3=r3qX%37%RXZp(&35@_Rp1R22BXIk^U14$)X)%9k#9&{xkN=LqT9$ z$pRvSIL zu~|nMHGv8x%6@5 z9g_VpOh>kO@J5tS9$GgMu^$NwrYykGszaOnZQM&1MvI&_irNx3|0*4WmiabrBH9x& zjSGl~eE4Od$io0}nLng#$S_rc&2rX~lJPXG1r+vtbXscM!Ya@WsV&)(JTm-y{zvJ6 z^2L`QPUPI_?4LhrgNFhJzXFiMP}!6E1dW_D2rj6PTb%P;ZQ( z{2C1GraOvr=`j}R{e+hLJzff4ALDxiKfRM-hzG@GPFj$?OZLP342`Z;1}c; zclm9&l#;OIP4KpIGR8uD!$<%bjHe>-ls3Sssx}FkEjK8zA1Yw@c(n%6f>)aR+B+l5ul@mDLyHbwOzPUDvGa$t;QGH$GTXs>7 z0V^z-kZuRS^`W)qH)-h%VCvc>apb@5DqcC~BYfj59(lawXhxOnC<6}&HDzzRsU}Si z17NdRq4>!N8Fas{hfN=1MEvZ1`C=RNO_cJqn1F)P1jX~*9ZaGDDDHx7YUs62X~QZ` z2-@85n!%oVk)Q#vP7b6Ka3FsS`-v%nctfd1;-sAp*oSpxRfS78TgX?)i>0`V7VK4jEZC*Fb98V4a5t7zbF##mV^-c z>!d{R%!GM*VdJ-u@%o*QqEsGeM1v$L;*ybZAMkMBEVHN9c%@`#*riWHYT(dEe$7ANXbdLr%!(xiDFkyANb48R><>zpW z9oM2qIA^|RYLEgiWFamf59tg4w*_rs=xie^ysG}W>G;z z+bd#p#Z^G}OVZHPqt%IR7KEI08?da>Hl@my0wgPLYtDt}%MqSr2<^Z0{ZWj{4q(J+_-j@OV{qf&Cmz zs#9$CF!Y9M3!<>uT;R2PFo~hpGQL0_ntzK%-~)IP;(;P`TXqNT(4ATNT1IPH0n_{! z6!sy7RSxc4XvKSF|7Bg(n`Z6NTx;9Pqe*Yiq_p~;3o5Vve@N*3QECblv@GWJX(qa$ z%=U_YLr%oTGkeh{k$RD5HRlP=-SQ+E-`qwsmej)az-(r=IZ%H(I}Xq(<4}g9pjYr)l?@&FGA}Zd3a5c%J&WvIqi- zIRHMtBVJ6+1|6Kvu^LQFtC7?7TTqyC1FI@VyS>Hi^k+uhuLH8M8iF2I43{tSbLnIW z7Z5A-AB{mG9T+*%ZMw-nXv~UJ|3K0FfAgN0I74nomI9Zl{{-wy`bY!iJn{_{Mw{j- zjVsn!2@~qN)M#i8`X*Cdl;YrH)q^YVg8(^4Fw!t4;q3J&YRx*L{U`vG#4YB+^tya(B2ivr%2Fxxm-wpJIi(oRHy`n#dt4^-=@?V zUftV(okHQ(@AOKZ!Fey+uBz;52i{bKbb+&R3m?ur?es0~)kqJ03K)iuF$e@eEEGV4 zhW@bx^Q5`}-pHM)XvaUQ3=q%{teC}SZbI~*0NIUQ-k0X~i4TWB#}7Z2XolgQ)uH*) z>{uMkcFBim?tQyDK+Sv+#7H~mm4NXU5?pDK-;JQcS0({YgE((l+{n162zW_>t<~g@ zSFEUwiKoBp9U5jl5ox{?wGg~-+$;-_x%){2rYs;0Zf#F*SrYn*|J<-JK%5t~iwVVt zHQ}v}3~MBr1L)1_7)*x%|4Fncc>(#dK5Vo267}PStY#n(`k#rj3%YPBWwKiD+%EaIx-|dTfxFp?qwvMWM7#M>1-4 zG*@)MtQ*)BDSj5u||cAAmd*kjnf4(1SpX412-NO9Y!o(RIejku6+@%or#q|PbY z%EFj(>kBMvkQtx!hacqEiwxF}IgpP%U8`z_FK3I4!0>@~h_H~Gje$+M+)Qq@CoF8| zyllpCJa%rf!S?pT3TPYaEPxh$#q-UV_!T1oRZpDYE`A7K_8pPR+U)P`T-ru6<*n#{ zp*NfSmua1%W1MT=;GO=}P*OA09u}<+Jze&9_pirwSVEozTD-Gqt6zx>-tj4W&t8}a zBc5V`n&r62N8)fy`7Y$Vw(Xgeu<$z|EvEXpzSG!5bNhlp&)p^Svd@Nk_`#$Y%7?S% zQ-bb#M+Mn^a3FC%>Mj{!H-EGW$@IM9)1N=YiR0+)Z=a@iy=+LCYyx?Ph4YAw)S6XjNIVYCe@GFO(aaD$jNnjs=Gc!h*tfDIlan*5LPEYqzYt4wHqJLRV# zBz;h`Ha{W5Ih?#6ji;S9Gf_pb!h)!SRYH#ruhaOP_hB_xV{dV%Svz0I#x^1{8NoNj z4FKBmS2m0Iu`Sc_aIX3^$JDUF-TN;VmDD%E8e#WXUk~5>_m%nDbkHQ`%P5@a*a6G? z?2E>Z%)36i*_dRpF2qNx(T*EewSSQ`mpo#+cCFRRKLzp_q1t=E=FMJs1-=eyZyGs6 z4N(XW>vuWX^fJT?P>J)}t-d7fu70p2Xar*8%RSCu@XuVBH0|UeT|S8NbY8mFSL=T+tK9w}7uJN@SWxaHV-G9XZerzc zAoDw3{}bqyOpaoSzYN9RNG5KvCJ0t!g3Um1=zhM!CxEu3R?|h2?2MYYio`CyI5YF8 z5vsz4u|f)4C3#zmCi_p4VYX^}FgwH13+xE*Ny*;%_YNF^Fp!Z|O7~36UyDZ6Yt1rF zLULf}3(CqmjuV+?1S15Dx=v#?&0=)sdtXh9^{}XzrIAj_ZrN`3%=Mr0tBJAnlCT9! zCk8w17_?tx2y`M1RAJWWSjYLjO#`4~dF@i++r}B$OqiIy+QnnDS8=_iGRGuAxbBXIB~TQVL)+~yWD74p zWOrR~FyELyln$lU0<&&8;UmU>O5^8)Tn`hXUW5z0852+sd+DzBnAHA75HdV7J^zz= zV~G8Jo^zl^WHH((*@FYB3Fp$76?r{f1VgsvBZLaMaDZ4uVY;;8=-)0GG*8sir@dmITvJVZJxGG6@61w7c#7stw zeLInHe&tho7uxi34>kzjX~YoUX8Dp!gDI_ls96=a;9A@L-^NP(*0*?=nN)lBF*}o& z=qnWfa!luK$tQ!pBbCAOjx`VX?H%7oz8aH~0U;N$DQ>%oOY~>c7lPz}G6PT5XipV{ zHM%Au!ze6vJ=3pwlo(zNhck?n)7_ceys0*PTta9pZ86>UMUXBD#JfB%_$F5I;lRw{ zps2};O1%);rbAV6?|0*yEaOi9B}YB(a~QiLvJ(;OJDTxh?WTI|+>Lo;On;+*8bq$b z1x|(_$5SitWpKn@#+m)?T4ZV*?%;B1o!1;!xc|yw!^XJx9$IZEh!>A_J5Q#s`(XX@0(jA_AMwJO(VBie2_lfO&3>`d zG;7E|AUk;SMv1G(A*{_*rNs-;amey5qcGCGNARzQ?IC3P96R?_urOmJHjYk{D*29eA-VxR;b2u ziv4L21LJv3*6}$jwW$}~1iXMTd!ecwOS8G*Vy8Dohb5u6S8GaUPNBCF>=ce!=5PLH z^)wkhEY+`tvFCa`n73&6PDUF_+&g6udqZW^PwQ5M7bZ*ljp``16o9?*=2V}NfYZR( zd@WW>&!xJ8vG*YeqG}yLfv@67P23gr?yvH~4xc_Ml|oZvcvi?_RDJws+ih*2R`AAC zZHQDDq+EhThM>bEe2G??L{Ab#7@}frjef<#Kf=!6ahxT0-15=lbk%{Z-M>eVB|wt? zD{T9(r($TFo_{~2@*$gOk|?4e*X$2c2(Y*eT2`WOJ0e15aWKa*ck%-BmW$^Jik;)) z->iFPM+51_lR$ikY@lgpFhp~RpIIw>ba5{t?$giH>s?LL;R@mUfh3VI84aC0Q8yET z9I_IWkV7*ZgQ@NdviL44Dpa@z1TYh7UiS-X!Q4&A8prB5iab5qCS=_#tc?JYXiq0g z=i9Y{$>ClH%X_BMqoMx8#&l}Ws_bmE!M#ooM;nZqEVhpo#pdqz-i8{XY|LwBiKJaC zctJZv>+k5m>T2proj@Z`W9m_>=MN)e{%x%t0~E~3Jd7uWx5-{YHp$WXIDSE(dJ}on z8}={pSc4;*Op;>qES9Ug8Y>+4iU0Cp6}I$J`sk)gjXJ;@XqFs+K*twXs5fv#k~enP zC?gzkmXIV3OR?n7Eh*$mrc76SF7>-QmByN6Pb-4@AgmrF07Vvs8uF;t$Ov^dN30>A zfYOHe2TEhLhT?R|wljS$K5j;d{y#*@ckxk16t?x4#|)_`UY5%b!HJo)9Hbf4@7@upBPASr2S*b?@HxQ+NDSIY#V|P>{-8 zWh49yS}2>Dt`fv#OG5oXf` zAd6#P1R6k6>cLR!{&|>uFY>9Y#g(KHb-)5krbNcvVC%8oCg{A(q5g&#QHnXUdUVZA zJj#ge_O7aiQ95b0yFS&tyTbZ!)?|VWHe^Pqw)&HZUIF0hwIsZHvnd^aJ}7RS?8HsJ zB`bPDkvxwJsD)&(hgNry+OBLu=8T4AjyyH})WkltGaoQz>E(@p?uOgv=zT!!0gB#-5w2};*x~eeyR*yY{V-FH!Ky0hn;3$Bg~=f zhOO@z73WH|i4LAkY6}obz@==k za(G*+{}w1Nn_Mxri<^j#wZvBE#aS?u1ryZN^#~%0m~QTI8eIy>HgA1Wp$Cr&E6HZC z6SN7e)5LY%3KM$Ia9pfB*Lv%;IMl_`AaN`#mxcy%F<$6@^8!twZ%;Hp#k(?1`j!QL zgD(OL-QQ_s3j$CtCIGREVv;yzAiwAV+&`em>FN4;0NSxMU|3PH1GYd)xWBhIx{|Eg z{$fX4P~7NR2NVQpi!t!&GwBgo&$5e(sj z=kV_#jp-*Y#U#8=!HD+_y#i#=OEcS(SYzt zD;(G@L3V1m*HGebt7b0m>Hsj|@{{Cn(VQpAxw(9^$U?3)(k=W-_-+@(Q=`YRH7}vI z=Np6B(pK2d0x>3Ui|ccLs0UCJO9|$%nCISwOd2jn{VzkfZyfU|sPQeH(Z9W6L^Y$H zwB8PZ6E*&L+Jy^XQ~c%a6Ajf*^1zQi#@>hN(&9*woZ| zFVtOMT1!)Dv92>scNXNxzGiNZnOK-yoXx4)+sM~qOn<2xl^`?KV|C#%WH!a!S8roe zamAPTp{DnuJT_rR!B6Ru6w6HFt=_gVJDtj%XrP%Q765DMy1^_h% zT0KyULz(ojA6LX4bdRsiZO;%F$i(UKcv!Roy10Y<&~K?`N@(M0BGF zQK%xoog`23dQ*rgqY%Sf;UB^V#5uA4iqohqT=Bl>{?u{UsI2iF$#P8}BKze{^PN1c zwB}G3dpJZ3S8`?jm){<`=k(Wcm)>St==cE?)sJGhv|ncdvuS%^LGZ)rJ@rmCG$q$; zovoYFUc)PL$)AsbVy5)mW>7B6VV)RA`$S9fx+=O8Pz!hGm^-vr0x~~vlOKiXz|IrI z9{s?hx}8+X;tHTE4DMV2|Hi^Uk(MU!k>zqR_u@2rLZ{%P*TBWj1Nu8w52W1r6mxwX zZl;WNT&}J+-Vvub{}M}i#R{M}jhXxU&+NrSn3xbqeJvu|EEANZNsa3jtT868ps}7& z6{5s) zITU`CiII8rVH|8J1fbUt0H6e=h%|u5ajX%gjbv@<5}10$LLknw>f70M#o%RLy*xHM zV!^k2^|~1;w4dDfdHp)_8|_mTIm@V=nqsqH!niH?o& z#h({UvP5WC&J3Y z0C6?C&t(YL7aYmUdo(4sXo&fj1~L8W45S@{1b=kh++FEimwg8Wy3^EzkK_2uUh6Jm z(AV_`7}#Mk-ie~9)~TJ9AK~kO`}c5b*~=`JH?f}$r+u~&!bCi|hwF~zD+9j8BZpQ? z3V}l?P_xysbGHzeK(#;|rKGe~`E8$NvU5h#penjUzgt4QZZx%gizcFdNm_DIR)?fs zpu1uw%~?4-SJ9I&+c(jb1HK6_Z8W@?woCo&Uw8i7PoVj%T=hr9U0xO4X+BAUik%Ra_& zx30aVtYS4f_GkVzFAKA6;7qCAoH=SsB*i2adxz3Y-g&E_eX~bS`NJ!|cI|?8Xf&2% znMM{1JY0i>0=uGAEf;$$_x$Yg7mjgWU0muF?bCKKry0|V;VlfkRL5= z>alsL?!#7y90DZREW^VINb-!nJ$?efqeuKU?GivsKSknl@0`CZ*Hba#ZS#}JF8bJ5 z%G|gJcTBEo=J>2Yw98=FxgQaTkqB1?Po>u%h1D5*-b0$So)ico5{?}YWl52nBK2?# z%&=-fpO0_|+e0|6J#8)Qpo@zbVr-Mlga%o61F^EE=gzNJ2x@Y!!6}=O&&itZBn=<1 z$CNGvbcF#Tj%!#8yrLY25`7oVLHemoM9ZhTlTZx#(huNP)^b15W7yI=eJ-cA+j5_S zaqZ|RL)#7T7(vI#X@x%*(cWe~mq=9G9+1LkMPc;5r^t_H6I?hP+i6(J z&Elg#TZ0!5Q{>p#*BM+coiz-}OQ>*Z@WK(|dqV71a1vfVE|D35KH10z(2XRw!H~L@ z&m^$!h%tzd2saZdbpwG;v-md)o|nFc7#$7U2Dn&_?!xkm`5kt4Xve9AAUxh*G$0~; zZ_dKQJN>UU5RO?sAPUk%qP!MeiGu6# zqQ2bVUrCST&@b@VhEpu?M$2v@5zA0j6zz08(lT)6hm2lnxS5v9GJG+&O`0=q-Ytj7 zsRX(Qs4hjap{+Ik@t*mqUCp6Y(N?ucS%!Z=e%B6$9g(lr-XH&fGx$oSPT#Ps<*i6mOIF{C@>` z`#Ou>*hsO5c?H4(9o<)s2RO(}v0vDpVu|Wc%iVKq93kGMV_fbpLwff+SVzbXEywvb z-mpN_cS7BG?By$oG`X6ws0ug}h9{-fJILs1uf$JLU>_G(Q-jMc+G>X^h9(so%{xPY zNtPxE@GV{_=NoB_ijI`>4}@07JJT!3Nk$5OB7wM19AU+1&Mp>KZ#gZ(n! zd2HjP+(M#T16VmBxV3^X>~t=5#~7C%GitaR`8h5RD}2A`4uhK`Y*^vxU+bhmTS>!I zSAxrYKU5YdT-Xba!2W$GyBcDUhObP+Iwv|ca=@70&#Q2yU23JZyZ{h7BsbQgfd`_o z9YKzExs%#iRi6^)gbs|#GFhOV0Q2z2vAx;7!I?mzQG+sTDu3q~r{wz|5elQi1yEhy z-JEK<7C$*%C+swAE0mR{7m{_gw_|)%3Se`~H|JITg)EooP}(v>z4)6yN#e>;0j0GHJ<7YYuAevSRTLl7ZnWa@_wwpZj2H-`p)T_MJ-==Bj440iGOGb^*pEW+ z1Yv>yxp2dylcGZ-0>8(5t>nn{GOft4>;rFbo=C*f9PTT3qk-xh@R?ur z1_BKs*#7uR%kYOp3xw=EGcYZY$fu~ErCSQd3pf;HDyb8w|7Y_iCpkxE{6Xqo^$!AM z*HuggZa?_QXuK~7cW#$4aa8_$ZQ4)m#75?dM2epkB6X_A26&&{)#$?iUm_rV4`?%Q~F{GVge^aNF6oIG_Vlt@UDH zUb|>T2vo;;FOfXp{S?CcPGACjrtR) zRyFu?GO@}!o^71Q@)7BwOyLOhk+yzu14p53WNM~!$_2OzqwjCd^04f(UC{%CjWVFa z5U=qim5ITgBqw)Z?hp+s7?!krZZu4|o~pLQ!QY#CxY7NBtf2ZIXGmxs$p2Qw_4ur5 z%mE?DG#=K2uSyV$xqxFyoT;!7Ip?1Jw2gT;hKMc{$q8o#CUUe9`G9a3 z*Cj#*B7MOLu9tPpYWH?gmMoIL$%iL`7*>xejS+Hu$09;`CaK;Ehjdl-n$&M%D;Du= z?0#6rN{%OU#%W8}apf5o5#f`6G1FJi$tz|SNGs>&cM3D>>LFJ7Dn!gov4j_ngiNnA zy%;P4xTDtaH9p_6|&& z^d@upAmK5YVH`P@hJoP5IJ$%eBn ze}CkjMLa}Qows>wYq|)WHf(p9W&0Rz8uxU=!3UbV?*n;c5^ckEk*pnf$vG3>0UD;X z?oljegwGGb6&0~$@2G{W{6zGt5$iY0Na~U z-=vGY>#Y`$QvNlmn2-&H)~&!9k0ND?=Ji$1YZw~6dIiQNBBJTsJci2oV&bEuJO*8I zJzo9kb*|B}V3qa$;rQ2|oPZM{%>)Hof~2x&s<>0HuFGbU@Z^Wwfj z|ByoG@`Zrj8uE^)pTNP!p#{6x(L*4vbnEFM0+OqlWmh>2`8RYF(N613TIm8|3+q(Y z-Gl8&N>)c<3WoG2Z;3U-))G`JBz0JOi1Z+y-OX2r*VTqvLXF~bS3%y)q?Zn3He@{^R>0j$u>5Vfpc;g!{`5)*65h(W znI!KGGs%)VEF5)R(wOQiNwYO7;XD;8QFl%ISvyd>c&Sa+!kC8P@uf^rKA1kj**}uV zN_+1jW537c)ERP2^aWO;QGRTL7M(vZY&DG=dkB_a9q%uI%UxzA5zW#PT2O$4O3k`( z6#t4u;3OAJrHfI~{L>H*^NzZv;-bZP?M>|u@U)1~(Vj4z5Xpib#TN(;>tQaHuUTNo zq3`i_5yPJT`+gA|;Rf4q82bJdHA~%RH^SDmHx^ca`jdCZgYH)e(R@qMOQUrM!L-FT z7JFfm10Z;I*94ycSLK`OR$zu&6W}jRaYWzL_aM@<87%oLFlwJG?if+I^KUO)>APFp z_iHpBMq(QHxy_;i!i?3i9o>VK?bRDK{ANMMYN}2qh6pSU$}iI`reTL#S?{?cDTeL> zC~MV(Y%0HM5J}yc4ekcVv4o`JAnOKsfqn1j9-q*~8xdD)u;M!6@F5K?0J})_ z7WUIhRR(;+)j_jq-K!iQjxIL8kBFH}DKyk$kA=bINzQ0cy%HWz@M3tRcPsM^PMhDE zHcJ~w8C0yO*O8Cdc{`9gGg+OW5Ep&)-LDH2#t7twB@$7dk2@z-ou$G2;SzZ;Eg-6Y z4lDU#qj*6hBz1I;O4AzaG2LRi#yCW|kRr$$DZW;=uUiSO!fF`YL^7HYmy0`21F*5$ zU5kiL=Xe6j3e+TYcI^Ge+mKfU-O)~;f+8yVD?6bLCzFWA+@Pm>>ZQT6;t``9)``Bb zb>jL;6w2q=JejFjXwliR^z_({DYH6uxyi{XK}9oCO;?$2cW|CuR7c*pbPM`NsaP)a ztErHqZ8IKbFOW*iz&ITsd+tlLp4Sifj%^k=eoN#MAKEhC2zF{r7$zugyb=eBPB8^2 zh?*%2uUiEo6LkNb6M85#ppC+^2R@>wW7q`hl7VwJ-QE-U)`(6r;zmeME|~KQ_HaH- z^dQn6na!}}ZJEaI^Su(w+> zJmcs7U00vmx8kys4S8gl06(b_jYt|(})e0jZ4GDc}^HKvvf9VIY z9ebWqzD((9SpObck+M0CP41V+<&E@DBATJ5Hm>!EyFls60e6IXeb#nE^Q6-1r=S=e z{QlSZK0na)wffnK3N|B%yXf_8r<#L;SCqG6fIj}Pk122o`oW{jP}Af^Z%Y|`d93r9 zBUF=|*`RI-g~f?5%2wf>uJ_IAY&yz6*|$-OzDPv8*)Su<#v(4&1PU4c^ac zK`>z^+IV59Fr=0R#uQrMS!)8S)Z?WaqtC0yuHN82G0}Fj0iTh!u)IC#P zKb=H9R$k9CqPlPp-WtY?!E{-)hRaL+f}+k3wo$DZYA}Zqyj2`O2!oMynAZ<>0n9>{ z3Zsx|DYEEha@2=00u3p8wl6{`)sy*MwHUgj3CNv@00C7CD*1QaP*w8Re#*^>U!$Rc zZLbg;><`^_@U>s7WVw1hWq-&P|3I^XZ=ceaohSVayg#XX3(FOM>uW}BnjB57H?z11Ndd$Rf8C8 zy_+G3YXRp0Gg_e+vK5nYs7{bLVs;Ksg$ciFjkF6Q(JRRk{RO0lO|YtdnxnTB4xkxV zWcyaQ3?H$Z+^&T3;A8=QzWVp|?mqlg=>ho)FJsX#2*8_MYdQ6U7f@CAm6s#xOUaSk z(}k3eyJ!?b_+2cu>svpcYUu+J%hnb)*$~9hclbS%=4Vgn^P~ zJdW#V4)_-)ygG1TSz=1`RyI;OmmYTxp(GelIDC09_Yar34(^}P-Ohz4?&NDYuF3lp zlHUh&Li`Mfxc|JQe*{gg|5s7dUE5e_RQVv0v||ui_38bs&KR*lni7JlC&zg}yq`Gt zcP=ih4A3aUtc#p{n2u9*z-VkYbxdvs*mKK?-`S)QnJ{G7ujgsiB-)DU*JTu>ensHo zj~A6WkI0Oi&0_Yiy0julOIF>rUNh7f{)634kRq)b5}6@yc-N)5_2gFgKc80cO__WA z`UgE9_RwFfH}Cy^%Y(S+&@)BsSTTaME>~3uycGgF3UW+r>D}lb??u2K(*f15$&Zz1 zW+S;e5ukPfrG}4_o&8G3$+-eY~Wm0tOGc37(IIDUd-aJ5;EsRi%2U+Zgj#P4y#tU1j8L9y4$7x)4Y66G;jQFie z-Uir(BKV@XE7ZylEA*DDw4im{+s8)?3COf!c-yrc`d+>@?dl{K<&!hRM{}c#FhI2} z_9z5-^rfbKQ^sOgCBfl_fT~WR=;nA(rZPk!s5#ywg;CoWBRX{Zr&}WREDyg1r}_%T zjc9tF*)+<3Gc*PgU%M!KV*k(qlA~FP15bCNz`5M@0gw9W1~V+`jC?^h7~d(iRQDNA zQVQD1%`_~8ARE-D7uVv3+`$}$u24lg{fzxQ;x(iKM->|Y@4L%Ar-%>9@d?V?+|;wJ1!wPR%{boL5k_`g>-ng~O* zHuYnPd)O2q7U}CwD`e3G>j~}n`jTA~b9FlS{dQsD&XbW(e80;%<|Va$(14t-=)gMA8Mp(b*~JlYNA65UC_&Dp-5R`|{NMip zJ&vj$)P`6``b9>%TYoZ&-1N>Hg?6kTxCSQ}luPId4@=gs8MYhlwbWKMo;6b7O@Mwq z)XhI}(jA`?Tv_h7(KU}{Yqp#p+T+ju5d4W1##d_hAu)YGb%?&x{F-fiNJc!CvAQD%@l;?*LVHF^#QbNQvBXO2Ft>q zlui_>>gw%^v&)8P<)zYfrugR0ALyd~qJiSQ=l$0sVd2Bj-hJM|f=gTJ`1W(876mzB zpa)yri03(Oh@#6AAnT4k+QCNNR=w??WlAs}InV&@^*N3A`NOevvtHMc86WA`UT;fd zkz?7W_X`VTq!FF^yg1zeQq+Rg#$j`fi>gE!FBGob*Y^#vJ_&EkI7c+;eGCEINHgt z3Rf*beWqRHG4y$oeFPJc{{|Hjl+NsiVHDuCoft9g$&^z$H+JAEOO_S>de1@;oNJ<@ z?4(hGCDpLy+(FFJfh4^D)wPP)G=JY*M;V0%QL9j2AM*v@!Va9#-i=Mwvzb8;RdVKT zc;xYJH#=mV_@9>aWT4Kp&4_#BQjS5KoU4H=VLrTkmp1uXFDOU#?WSBP zd(g8P8^^WIPeRyCvc-Q(A8?=Kw2P5>;ns1K!)G7yb~z+U&&NeG@=P(WF*p+ZBa#8C z518%6{<39`QGB3(Ou?qllmaW;u&uu4tgLYH9x$B(taC^|!aF>Hw9`Rn zo2?G;K(isjzv0ub%5^!M0^k{m^8xm40aGr9v$R+J^9re8hY;!9!;3RS01eyNx#zs{ z-O7~2Zx3|-Hc5ep#T)@x!ZxdPax-R#u+sIM+OYBO8K=;lbUW_MwK zpfr!O@v^XDQmXgX8B$)U^rgte>u;y0M$cPv`;2T}u=x_7kIeD; zAAUpqEdu2~Ch9;0JbmfV$h(85#@LtOa`TsiGYlJo(M!oH;LgkT0sI-@p+FZ+lrsWX zOi?2GN3`Ied4qoyKqB!|1|sF?hWHJvgP_SC4TU;nnZx{9^AYl#I?k^YMi^8Fcb1oe zBq~>+(Me@(D5DkAl&#He2U1miieAC%02ppy!7cs@5|AeBUHd-dg<^)hfN=eo`1Av1 zkl#`j8!&HG2)uX(GBv=f6oBl46ZDv|v`79L$W(YcgR4O%2)J8U(;BjE>X})%&`i0l)M-B5EvX2GeoKru(3@jusAvPrrp@(T&IcrGI zL_P4L6mkaUQLfoFG_dNj(q!)fJh|ZE$|S9`vo|dlsLzo>+7lpEn>`L9K)emcAZ=as z*uF^Vfmy%go7iEJ=PSQ)ZnaTpLh-@*1+)~-^9GK?eK3*6{u(hBIiU+cLEk9hTnsA_ z_AHmXv)e~da&^74T&RO}NHWII_xQI103oyr?W>MQpmpAKeCUM$u` ztMupnSJCoh!KE*f3KUm!pyWT8ib#BgLd!<@+xGLb z$JO2SX#Mg#+pU{Cb*eM8;Sq9IZ}itv9o&?LMcH)VTm%xZI*#tMdd_L8S%TEPJZ>1H13 zt0ZuYv^8{AzXQ$3H#={31-{-{GAmniig1`QO~=8*o^-1?lczhPU!1soni*YEB+631 zdRvpvk%wIPI|^s24Y!?goN^Y;lO;wsoGW}ZFGUp%wE;BTzDa4OJhFuK5aOLwF34&= z)zr?oABd-Kjq>P0*{Hh*^=_1L*pR^-CP-Xyxavp`wr8}jJ2W%KhToo~1L`{~KR7pp zP_>lEO=o(8P^!9%+tkLyUu;OP>_{Jov; zZm5x|C{g!Cvuh**$USj@F;`v$@D~#?Ul04 zgF9|>Hqkq-7ZUcb_D?^EI|uQ9aJI?ms`Xyst}i;Toz&b^@BH)F)*Uz*d;7-#L2(XY zNK4KewQKj~nzj_adzhFYp%in?Xye|DjvG0xAEla)2EowIeiq38 zmiKS$4)!Mi$r3zL);aYsq;01&v8hzZ! zx~N>P(7G3`))aTO8gVDazWV}n*sMe8xlZ0b`UEf0;U6#4#g$3gRVx2}$Mk9sk<-x1 z+q(}b>>JB#fenh`t1H*!h!cJ9$DY=Xd(y&uF$!eFTij~~sZv#pQ?BKJ*dHSB>^pF6 zDrlo}w8pbGiv#MD7K6^@H!Hc?wu~9{QW$?HlKkigz>i<=N0L%p>1c}bCO(7w@MQ^J zj$}s23pAZdz(-vM!{TF6=V8Ap*8oF6yuUB9@&S?|GE_1El*l(Tj*~YIeq-B!J-iHX zcJ3XlBj=kE$WGDoo=0~0tI6dU99tDeGh>rkGC`nHdr*p;Dd5wZL-*xQV%joH?J6=C zRkOI8e;UicNN1d8X-shq;=^{ZAKrdSHbbRh!d{baDA|t&?_!UDHfbvQxV;vI?o^B2 zn)bTK>%8n*eGP#C>brV!cbD>%LRoGfU`Qw0j<_;e=`Ji%Xq`E#CAGOnBecQM56N*X z`5dX~t!btJhIS1`^lZE}_bhOw(Kv#-D3b#%lP92EFpDFwO!lh*iFE^>Ad>S6n3K8c zV4k$S?SAn&dWC4ipN6$svnyKTu-3jGSyF%~Sgr%}ph4-{_B9 zpOMj;+RVpsV2XA|oFN-^E<&hRl~r-rz+6;alGR{v@i1jnfn4!Ecqj6)QC<&tPXK~+ zJXU_mL(neM|FM-$*&#!5-Pjt>_uyqK`~n#lME%#MyKMV1!2H9Z%O+ZF|D>7_@rkHk z@T?0MSP61qqeEK5T#6N;(dQV0Vy7upUtRUQiR=WeOs3uy*h`;eyf{z^@Mv-f!^k}^qRVMMNu3^ZH0sbwAJnoxYp%<%+JnF?e{+!%G zZk_f|z3DMHMnrS-GtSxmSAf#BdJ{7a`PGFa_=U{pwQC@Su$&^MN6rf*CJ131FrPN8eLv`r7lUqe7cXB8V z?n>W~mkM4K;oTZ=W5O5-u#K<1ITpdkQoQ^)tux_4KkT&nMhHnt@cxSjxu2G-(bX)@ zyWsOmAnN5C(xlIeTr6;W6~ZDCVgGEb#1G4?mBheojUZR&I+6<5 z-d+5hZ+VUW0?@B>(*BK$2g(nmZ=RG%llD5+!UZ{a+tFyRotgsL>GkfA3i|FSNHGh%bx%!#XiWoV%eHOXwr$(CZQHhO+qS!Gv&)`-W8#aM zi}N#f?#%UYWTMx+vWVOz>z)P}5a(6xz>=Uz4ZkVqetAh4^|YD@bn7+Bi<0Fik<_C0 z#bX65=VJ%kDSSjSI9Xy7CxgaSIK8elovKX{?r>0=GhT0E!JQxs&Pp%8LNCWUyHtk% z8~|CM_F|FZ`cVKo9=sb%$c=c~`Fw;YpeRM$6^t@c&F{w4tn#KM5HxlIPz*aSi^K|x z=$A|C?!xzEBeG-xPb&iQwyZo$K$;u{leGY8Y2rk-6um#B(rd$qA*Kc% zh$as+Wv`slm+vne)gmvbvI`eVKF=Mbq~}uqw&y~`9Q*N{UKWGT3*Zw-Fg?)NlvIQ5 zPcV?klL2^=vlAxl?MK>P352+SJo{7Q3mYf{g{OPpXF-^J3Qetl6aA)wq|ljkl*!h_ z@xF0IKd1Y4?p&XAR)PltFkC-wg}1?PM1HWqoiRcj+|Sq+Z)FTQ*#k)E1EzF?0l0Lq z9|?zLH{oVO5yo36e^p%n| z^Jfay=C5U+Z^r7t<5!(Q$(RqsFMbah4s(N5M51}s7+gw^tcL+mwfU(dFSTQU13B9R zWHfd$GW@jYTrtuRMhmQ`H4X=jt@m_kS$j6SY->J$7S2E-tjJPDcHwqP0VCPd=O{CE zb*${(Q@6=Iil8%=_YrHSgUmv2Mo`I;tNBHhXo$Ccy|cdfdg|wWjb&w2qvAetKMK{j zYy2)fDFM)4=CFd9%KbfBEvp4)2CsU!pHlY%^Gh)>Wo#*MI$vHBKvArGIHY&bt5iYB zVk-BThBw=b9lU>+g1)m{L%IGlu-qtvV}b!RjnnnF|QMn~XUsBE%xLvOk< z<*SjMGR5`rwz@;f@DZ}$Fv#<4q;6uM#`s4=-#<3dmihUOI@XR4A%;t!K2gFCnQI4M z%)MNuew%4);vpSxjiQz6|NeC+hGk*5XXV9Ff_u{zwLPh!ckSY<2*rT-FkQltm}dKa z0qekHu&MwlraFf!8WbxK_!OrsaV0Agap6{;hgtuL#v3QKJNgu~jg@sB-%^~Q%tiq? z|ugx9R04ZhF?i@QasM|=&lDpC5VB1cSxIjV<>!FuGgCq66 zHBh&G831e33hV{aX~&%3JtoF4ea~MGLN9og4kD!mieSIl0O^Hx!;V`#!;pvu zarL_q4sGeOP2l70cYt!|@T<5M3{ebI*S9RYcK_zP+eSS#IzBV2)3;i<*+leCdzaf? zxy`B)wv~C;7>)D=YoGMlK=xPqV`dI8kV@dEj$o;zv)OKvs~jZlIq9&?`%r^+(9A34hP;EhK2(mqBIXtlI3nUPH6D;V={jHCFWb zTr6`joat}AH=-cIqh@ZLP3HwK3Yx{T;gtwK6GTj|e(2_5)npA|nkJ{+owOLFzd;wo zNMK`$U;<3;*TCy9q70xMZUq35{U?+P6Z(|JhngnhqP);A&5br94M&Z7?7JSYHk@>Y z=7?F8Uro%rSe#&v1Qp|w%XEiv-nyg~tN`!4A4M$Fkp*0UVA%2LK1Fm*K#N0%{Pd85Nu%c$?Y zLmhmyw{AjJp^b+6IbAqBjOj!?*|Y0HI)dSx0PJR;n7r$qzW48ih=lJP+_0PYr~=-H zVh3IhYusSt>E;u`f@}=_fy}xp*>}S)X6B(yfddBN;|u{V&U(i{>0JWf)cJ>Vk}{4o zimEmie+V2wgjbB<)2pbt^-4e(%d;D`l7AtNJm?D^k!v#NhSJ;clf&(=gl+yW{3~X< zpXv8FJD9TPD!42QApwttQ)o{|9}#CB+tt1-_nx&T)Xu?3KlTU!$=Bm%C`Y;&^D5_@ zdp-p4;F++j#TIbo$I(ZeH$uaQ7I)B4*G<(m)Cr|1o1TJ=q{?p(sx6@dfjOav#DOjQ zmwF0j*+n(8z4)%)D*i6r@0}I)(o|Bqj8$N|Xrdtijr&jWv}f68Egm}7(Z&k-zlzWO zucuEinSJUNNj+`+-BAAdzFWe}0ZhW*PUW-U(ootQy5B$fy6a`s9M&ceXc6Aji)zQBnP%YbVgH1kJ; z>j0lV?8peF1fFp@(Dm6|quGXIgHYH7)l6&8p;J#iVo{2~-7-=|<{}69hl7{W-5ARo ze8C|2FPoTo&|!9ILji2vb4fbCLnnzpQt!&~N_LTkcjh%xpZ5<1+H zd$BrAc8pDd$D%wcorr7bB$qb&;j`?`H<<>HSNVe}SA+uF0N?ud=%jUME7I}jGJHt1 zay`FuHUY)aM4LuD`JX%ZiMG>ypHmVI2@M3~NCDNIxGFP&X#q@h)lLVbP;Rj8id+h0 z&Y4_kv5gbj+F8N}gW+Nbh1a?s|H{V556xvyue%9*F#_<$UD&Kq&ONw zdtAo{b%HOl*ouJ1XRX0tt07a0R%BXi zYcPLV!DY*nL#^(=+pHl=sCbYJb~SR>Ti8(_Jcsnr;1whBJF>{kkpV!1Scx?a;VSeguCSv_Os7;cJZ4Y` zH^0VDqkO{pVVH+%2alMzQ@*>4G9X2WK-`j?Kzn#^GLHq3@WP z=C8j=43+P1%UZU1vJwdAQ>NRv>8Kg44Et2@7h+q(-xt zvYVzTtO=$zI+W)eLyhohGteKxm`3lbV4HYpi|#+L$9j2SNGj&4SHyU>>YB(@Q>x0- zF@9SJ|9XKDs^E5T|1rQ5s83imzh%;>!l7RdaYj7211V}*hL8*EixzD{jir#Mh06y> zd3A2~Cy4k+XPp!okWS3@ZYzUb+4&MWGur^=1c$&No$u~l#m?t!4KHj-^8Oj&ZJEtE z?+d1&by*0Td<@Q$FQ5y(J_zVj*cQ{}0^qp68Z8?70pp zjfk!UrP$a03Rh8Cs43F}AoYR1EWG3IFnX_oWTrUu_rGpZ9PonaHzq+?|05yf|KgR6 z|MZ1S1AQCL-H6A-5z8TfLC<>Kf~kgctwK?=QCr>&qy${mntUo*(sNTxgF1CshK0xL z!eWbipfXu@%}zuBCq$|8(hgSR8vGZ(?Ra7-UH4fxXP|mi527FKM*q#B(9v3Uc*COi zgcCf~?PT`1h8;deME6q8bq#?H2GPX z#&3soJuVohWhW@hBS-*ziw0r$i_H!1S#!%tZI=PX?and2dO*l*2=@=@Ysq(MO5J zWb^`!lCMFPxohN8DCJvUOxkr`t?SN$hJ~J|%zXBM9n)K|B+!=Dz?1h~JnrZ;&Gmph zTf8g?(wvD6WU+Ta^D*eLH34YhHUq2_+-6|qT@Qjp1UwnPMyqQs!DC0?-{?Llrh~k9^D0nUSB~X z_ic%+umi`^%F3eeC11YHjh zdja0o@FHmhYvJb%#XMb~5!2>-xMXIkhriZjUbAudD{T;hD&+WW>z=;=#nPu7dM40E zv#&J@9ASG|<_fMvEr5^Z!TsXWsI1HbT7UOqHYj;@Tpd6~f0SAT+s&_3ATLj~ejFvzJHUykdxhaRABH3*mo?^X%S`%%4Eg>n`SI4y z6$UMiIuA#1z5BBHY=WCr4)~)mAZAUF-mik>ez+9ob9BeGm@Ar9Z=OK~J(fJG_hM{SWv=J$sQQwu8*7-t;=de?(MWVj_`$016`gPm}tr z=ob?MW{LI`?@t@<6b zjUcEooX$UW?whs_KyROGULOIUmfGuRwzb)KB(Xr3tb7$L2X6+R zkKH34OJ^7z$*Mx}V_&y2%t0#;?}a)d!0lr>exZD2%x%3ap;ulW8uXp)>}bd~#+O^Q z`ANO1qKWd9N^X?fmv;TIAioer{Cy88ehD^P1gu?P=TK#RPyi^XkAcLt8m{m76hVSC zMAtAT<*2YW!kq1j- z%{A>rZ4X~`ucWea4sH31p{tt)RmPyW>wS*cSf5nWXWMB-i0E(VZQus}exo%F*7mp$aJ2I1X#SHjO?*zi~42!Tma-U>`!AB&|*x9SmepZ2sCCc z_<-P&+yW-ZVrS7{F34Syn!-ys4cL8jWeRalI+`QCBt4^#_3_ctm5QO&{Dd^fgdd2l zkS>k+S}YW(d=$U^2SZ5O^`RZpwRve0%I^U2VK$r=Yzek z{|$Cq%fE%10Xk3SgPhGYtte70i46ygl?OkA<+**BNzYnA_&`R?;m?vkEu$twR)5vf zRN0jsqxOaL|6G|Fq7T@BH|NKMK+YmD{{KdM$9_lGSEP;6(={Le9>a=V#s(wzJuEg4 z+E2w;1_)vq7d^zivh(~vaR6z$ccZ{tN9Sk%i?LrNlL%M)glfB*Q}X~&edY78dCP9N zIl1X?4Up}NR3!n(#cJaebWyfy<2*G<(28kmu(BX49HKU?>h6A(bdOqXV+9!Iqw1J# z8+(%w+t%;br$?h8lIf85|Fz}|kxCQ1dX$#zHgbA+v%z|fu~B>x#@&DWX&Z*b`kcBQ z_~hqIn8qZXS6rTDj0e-oP5t^=tYP6{+Mzp7y8W*V^ z(zz}|;y<#Z>dF|0rLa2gUZPJkKlB%DR`_qnC7`J|(1#N!dmAN9!)KMEzQxfZ*TXrO z|9LI06P;!ah-b==a~8F5e-Rr`xqLa9#g`i{7JBFK2p}^M-1;3jD(P$|M?jHWUG0No z>wt@nfV6RbPhAB<6q)1_|ESW`nwNnDq`Ac)Js?sz*efnq4rL3aw(7Kuy^&d*iF~d| zHJE+MEqF}Mk|F}1e#e>ilPSw{&!Y#XH=y~Jgq6FKrdQJv%fg78w%zRhQmY63YS16X zE&>W1RUF-J)~+atzrz#B6~YVm zI!Di*D6xZx0qDdffk#+E2}Hsjyp=Ew&%heFvH^z0!w)-+ugxqMaF}VwLmgZi#+*2b z7H#Mp3*$iPGMgc$%j+H@%OLL)*tIP&v=a$EWcyu0>Bnqlu z-m2`2qNw#dO}UpdDEx%8;DUzq7Wr_~)fyqPU8#_jqD{zVoWKBG+kNM3V>O}CiQe^X zF2#iY3gju+ZT(~w2D)(=-HQS{oGLUu`J-dOk^dN~=g??UN1^b5bG7_xEMmQ0Z*@ERSn)CY3c@0=dn%@GO46;`EF{;o3(kvQYkhL zLS<9ztwoPJ0Cif*e#PvuVVn|>%Aw~B#Xg?5y#Gd8D*Ry4SoV)uI#*0_3*J{A*H!Ql z^HcEzFp8uhmq2$H3Oh=zJpnvf$;6dROm+Bsa&PN|0u0Jepm83daVL-YXxP0#h5@bU z!5(4rG2njZAx;0&?Xfx@wWK*ad%>pC9bPFxr2&%fn39F{(~Eqn{*iaxFYp?lpU@Yk zA5b5cLIV{>2-r=ugKbezT8a=kf%6k_;%g!HR&Q@m z@I`u;6-({CR<(Vtv_-}o_K-`OU1P47Y?)3#RXgQgZlKj~FwERnH zo9`qnxg|ZPv|6usjDbR-w>ub3PUDE&h#0?E6fC19>)`A6fV09x7hAZ5Gz1jL25-<% zg-FCgS^%ooYxf1SMxrc-w%5{ad_Ofv ztjNq+>#vRZ=hk>yLwkk1T=1lbVj=&1&q8F4Uf~>1*M%qfQu$e%WpYA)tbF1is$~+3m zp*3LsxBp+v4N=)UV5*+1;Uy$3Vx)M3XI)EESiV`(V@|j+KtF&{dwQ^Cz}c_ch~Kk$ z+<5NQb|GKOlalXE?8^7ddVd+n9<%6SSimZYC%7;Lyl0Q3OIq-DIpizBj}##is2A$o z?>`EPl;^=K%}vz5{3R*RpC23&hUtZKtCX}Yo19y|y1Race7mpfEl-!~Y_W}xY$j}J zvj-zq3D1)>7WPQAn$3?exjt2nk>~(DM0|p%C~6+f2b*L*-&xKXrxoWUxma;c8K;@2 zV6yWhw>#=Ewnmt6oy*}e@Wwe&ax<{!;?*~r7AmWPDOB7n~jNB`lTbgCSYGr+FYoiCP_WJwz}8 zpW15|Vu0LlzJ=9s*xa&BH7(#po1Q>5@==i|b-g3Yfo?rBhbp}=6Id_XdnIo_uWWH& zMRYqMl$dNUqR^)dRT`sxL22gLjYxm%h>m)M)RXx$>`P-1>@u3v4k?+k%>&AKSJZ%Z z7KULnscy^e+MnMd|0>AL=C-u zz~g}Xv#{sn1RSZ#u}6zXPW#f>zoM5fWR2AOu)RM~+>)yZ4~xr!wh}H?(_D z_dq1d4B_zot0VnzrMcY$Q!<*DYDflH&2<07kr~)D7FUdVh0xOp^UPCbTz5shBFiQl{i^!oJu7Tb+Ft}vB=ss zJ>?EkCRU(sQ6_2JX3BFu`Jnw9?61bX8KFdi`X3ab!`0Fslah9M!mXM3G)u3kacw-y zzHE2o(5^Co^6ZH=Dr+;*0|ah1Qwy%8S0lcIAO)by!U z*&tSUTmA8sYFns>9-aFb7-`o-nsc3{ zbtLaMr!60SjBX~q!iYTY0D@d(aiw&W(J>7Al3DC(fr9zo0k`SS^LLn$nnPKYUgpT- z*mTQLl@Ihmh*&|WtDG7h95K{*hFGum&#Ya7s8OWI0b6DX6H2l?ppsuZX?f9}IN%q3 z$oh{Yp-Gdze`!Y$4szhXN|tj=?rKXf1}!V=fXW%J`&HFc=%?hfMi$GPW~$i>uJU@Q zyrcmf*tdS4Bqj^O>}bjnVgJ(9b+*KjjM9HiiOvdCqTknxEZ>_W@sKxxML6A&+xAPx z(1^-U1n|qma|al*BF7`x(=k*f;#yTo1C~7PMZgLAAa40XwKTm=!T)tbH!#?J@;N1U zxZfY-4F{IS5t*ps!~4Auqw=yfJe1vx_ZxQ5GCCKF+`XpO)b|{P?z{08wcz?`;wgeq zkQAKiP)E!ah=cAf^Az`LLZ@BS+}=ExXEcC~d~DbH9QX%XoEd&fS<~^Z$1Y`va}{S) zZV$RJB|*3;vdz7}Co>~^c5f>bp~8TFrp+mw31cG>V0i8abl@27n@8YGAk+ky#8Nfk z9l2Kc3Ae@)q;91F_v8^kk!ZM+x)br2fed*B0_b3w?qJ`3>}lmF0Y zZ(36Qv&iuo&hRP@pI|fLY>n1>57uWEE1ml({ZA``@PLa0M=bi1h^OmpGB@B*Huh<7 zd1O{)XG(oaTA6Y;s?wiWj+EqHX=U{G_ZS0Wu3OXRzv!`}lB(#mp=sILp~VWtQXxx! zA}xOOY?aZQ3n8<_YQORqa+n|CC3Bv)PMq4hvdM=huMvhC3Dc}CD*0*_d$e1443&Jk zL(Bz4Nxh#~6UF&M`HzZ9xP6Avw}fT_KQp`?ITa!#>UtICc@CFn#S#@?tKh7>*4Af-jyyU}~AXBm( z!aYN9cjP1DV_bmn2OIAccp)en-S+IfM9+^_yCaxPi;li7s(Ho{_Mo4+TG&u;!{SQd z%Xn%4GZUHHjv)^@s55cnrK6`@elJI(y#M%Kb#uR35{i;;&vxeS-W)@OQkD+&k2N{o zkL8%R9rWQ$zmXAc1qI-jHnRT*{)65L?}fLqV6`J2_uule^x=ni0lI!0wds*2agzmw zfdey=w!Z0EK`LJ>_~cR_&clAFtP5Ds+*D_uXu70n=TR)vZEleY=U6cEzn`z^hH5FIFeD*MpTwCoPS(3itxlJJ z`NJ1mj(BI&e9`&`l4ejmWz74Jl@E)C(xk&y)8+z^QfJ>My*)=Nn6R6l#upy_k3EvubLMR#6qpu&Ozn*Y`#xMD8LU(YcjebV!oJ7r5MMlAMw-lG5E%YQL zSxKn`_x%G4I|udZYSUXkH@@=apQXH@jfcPmMM&<7cb}#nfWVf)u1@a4WtrzrKQ#*4 z+@cnmv8^3*n_dLJS?gsQ6&!l{+K1Hxp>U73r4W3*xlp{7vH{3jHb;9Z9sJXqo_htM z6QU`Ab&M!8!HrGwXELmK)je!+YRBBjL2`JjBd|OINxX#$BgvNea290TXz^wT8>$0y zc&v01PvW?-o z2)=;0yK6u;9~j^b;g`)k`2#R?rqQ6SC-7D*ZOzcKw@sp|E^Kz0Fz|>IOpmD1>zc__fWVJSsa>@_ea=>flD$yn z7qG%Gk@0Puq=dCI4IQXx8tT?#5=em8)o4L#ZOjDid}R=H+ciiHg+zdzLz<19GGLD% z0&|C2-bmM<&n5=KouP7mVRqQm6zZz$JjJa1M4y(kf=zcL@G|TYl?T4z@9vG&)~yt zJX95OR$U_R9U=atMtEK35;TuEAsV>#>g0@FLu}<bxfJ*s%ZBAnWi%^T3<-CX6ULe(DSi z7A|TlvEz&~13D9 zT5-v2(iC*c@1wclAPu0@(T3mgZId8Lza}IM(|B;`-4tb2eHM`lNk!y$r(@n2a?630=&ZN5ngwsdk0GUel5eIGDp>=*0fgj$t{Pu&V9? zc$>HvD6{;l2u>lr$IN5j-1ih9xDI%m&-lw^VCkz=0;n!N_BPl=mfu4)$i#lQ0@H!! zC3F-dkJ-+Z^SR1Ld0}8BGIbge{7@&iaWmYodb7fcS<4dK0n-m!^eL|VyCTzosJML# zkRZy|;Gh0la7&$d){{sB@tZv}>y!KgHd`-(2cMmwH#|I&M5k;PQVUTJ7pI~7plO+p zy7t~hb9Fv_LfS;aHwZ+ItN*#cI_agyYe-H=)kOLf>MPocZYsOoO6>`(9zGR9pgf3q zbB9m)w`zj%NtNyp$a~b-$~jus^T|E)NbxMXic|#?nV3mMg+(4s7v*Qea~2X9mM1${ zvN!agKokB3-?KVoCGLw*irA0+$pqfF&e+KCcF5*PqZ}#nItHgocP{*dL!C{o@0mY< znx0w9$IV%|u`R0C`LS>rUY~-qfgp6^{xP|+!7TrxsX1hD zP%(?(8l%nYHKG6y=bj*!=Ai1`5^OpTEGo)RI7tCNEXEnkhk1|BPqPJ0a|8E*?F>@r z$Z1$?9JcBV*AN&rBt@fAnRPVVD0wcF9i91g!kpyLF}gv1Mc&}J7G7hZc2rTi=>Z)5 zfcLmn8upZb94eIi$gQNG_&5i8!slNFl8X#&UtW6S|e%g%l2HH)s$e8>F$ zFT2dwdVxh5db!GVv|w?vBIFMCEQgLmzhfsRR z0BWrV1!Ww{FI~xzKG|SJ$1g|VRe~H^d1a|H$q>!d?+GV~u7;R;fqSkjLdB9D)!y9O zqbdl{bp2{~UIA8#O7DpG+j!66K<9vkFvN*w3@^ztRn24WN-&xed|Ho>RuG@YhV;<% zG*|NkmX$Z3Owim@?(kDp`7t{elqL#xzQcWY|J>r%k6U$fL{Ok6P4s#7HFEU0fk@>S z|JQ*Tm%hbS5=*h*$0<0~PfD{}mMkVN4a<)7tUNr(syV00t9RT=p3|qELnoj!qQs%6 z*Ampw>G31eiXrk0Q};H>uy(HVk12`XS1qCVnsrWp#Yf@KM8IMIvl||G&wzc>FOlWT zYOZeZX-)V*m&O|6P!dRt(-2b5k~l6?num;mtVB^`C2%-Obo{{*x40T^pwkOhYg3$n z{U0bpQi4TNtg5epkD=ESO*eV!pA-@^zCbwnOWd0C#9!G56}evXC_s{Qwwf!Xe|kuY zueevaq2Jys>KznBpx>I%KYR=l6H#QeSKaZ2+z`Y}Nt=;5{ahICbXqc2Y@>iee}s)^ z3`gM=;CNw7#-pMGMKtv_3?7w(jcb_K0g4K{9EO#4TJEQ{=gsGv%!>p}U(B{&r$n9N zf`sY|=GS47a&F%b`ZKPt_Di{~MsIR_#rqZj3_B}TOM~;}XuKs~l027Hm#^VsX8YYd z>g1_eVa`5;WG8YvFro!;q;iFWwR?*Fd|2+NTRjkyfY~YN+V%k`DIthOI>S zOQ)`X!ji|4(7>|YI-*659%^C!gD35sPP|jyq{tKL8txUSRy)_&)=-PBI?G-yXSP3! z0Jl~j28VTt)V~H>`^QM(8O-=B3y)8!7wo=}qrff@Nyklrwit^s8?emwspH~01;y5; z53(6WStx_y1Y#>>z!FWhZM_$*h;R|*Jd}_NR~deJVoc55CA&|;24oF5IA~xr%OPup zLiC@G-9jxo3bItndoKf+jLqn~aCpa-O0ZjZwY(=={Ve~gAL!pSB`2IR!8=GZX#T+` zZ=9AN3|*+suz%1h9!tZ-q#wQpJsuRmegV+-Bg$675lE0e^k)crbDfKzNpn>xcSBB_JHD|W;HY(gA=o>Ru;wIlAf2J3R&*&Tme-yS=_CsBF^@xr{SG>c7nle^3f!hx?i(U?c79FS zafikX5b3}i@G0*(!&H>=Hp1`3idqtU2$K+;ty|G-=Sn=;6UNxJqJ7)_884TkBG`VF z+hjN>p!D`6`*=RJaMYyG=(NTDT2#NsyL)cHKJd$z+YItSQirRfd9(OzG=_!czgWr-@60D=`pW zHZifK8Kt1RW^B{vhWsbvi}BD5B)?h4hcM)*6o>QoXZiZmioH8?D!TUR@~)1@5(#nH z;%iX&9Sd~<;UY#3xlJa&Ej}p zy9!wynh_$D?T{T`{Tn!X8Lr#}VIj7OAQ~7YrWw}Timz2r8F;)!xRrzoRZ_8{!)~!o z)|y$iFnRa*u(J%lZ=91#;%8^Szoy2pljUoPFg+3YvJ_P%{`~&8+2NGwFqCJ~N$|JIYbU{*o zw-Y5ZV>8~aahP&e#Mo20yqMr_FR+M|@O6LD8v}Ru?s2+|Xn>ESqv&ZxCXn~Ot^04r z({hXnPEq@-e1|na%zmCBGm~5GFzfw{_xK(wWn+>{##TtL2JxJ)HG;?}AM}Eo*+*sX z;nTb=*EWY`{y}o1j)E1fn#c)OU~M}w0%qH9bl-^qs4Eq0zCm|^(hIzBkkB1DU{35o zx{WceJ;DedrVMt8JY%})VI&i}GOxV4RpVARUXGq$laiuLU@()&D8uo>S=lyE)63IL zc;lMIe%LOPz1vy}MyDm@bS&^~OUYHh|$sEzL|AiuusT)TCk)Z&J5?6b1 znfV-izg&lvyz40sKlh(YAnTTN%e$aWGZ>sk45oeUC z@Esvln-=vLfV|e^r1s-qRI6lxS=A)`RBr(Q)xHqk3i0U@@)zwi6QC|#2Wgw6L&R|-Y0)b4V| zKn-7ZcoR)gkDMSRy}H-M0l?vhF!Snpkzb4Tv`1iLIM`=F<90Y!F%m>SB_dc}40%?HH3J>>@cRvIi}b>jP}q5Ip!OmfpEL z8n_aE`2J$63!G44oc$OfnFas$j;`y3$YvEJDlPV&BO`$d3%XgR){kU%3dwtyO7sL%6J zj35FWhuvg_j7r4)rg?(b2W62cp3--n+y$7ZE=xG_uVNSNmYyD}q!e0NWLAgCcKUfN zRX>2@%=W%uH(xjpVV?dfA8tTVTB)4RuWcW+0a7Lm)U1#1cuJz+?w~e>4#A3VgzBC9 zx5h<-!TNH;P4!m9rQ2GDn=`4jae0TKrP8jv{$37=K9xB5>N-HSURA*bQEfr)6<0ma zE~Ow~wv)%Y@&wpMn5;W8f?M|$a8DJRvt>sah^ajQGU9q<%MSc_A$1x7XVNtXG=DcL zJ|{Z>fkpzBCn5Hm4KYb1&DAlqBYQl;RZvdKzq3YuwqQokL586xJE3_#a}d2{Xeo*E zeM$>Jt&%&0ohx9EkszqsL(9wY-zbl4bG9oxDEr3H;>~#!Q6tAJmgTWP$6gM>lP+q& zoc((EA!rrxUPpo)zztq86N~mj&+y0=FtGY4vwOapK@+?`d2Kz)8HVlPLpSZtiK37*1_+BHtM@=I3NQ*!Zp*s(N5}UqD0@7> zFpiR$zEy*wV%^k9s}CqU*-58g75L&-icWeqO2_ZP8GQ}Yn-v#bOZ2y3EJ=RoqN!klYR6-LIFm!1$$u1 zh42@MA9{Dm+mrJo936m;wYa$>NRVs%?YW`MmSY83P{Qgpo+5MyH<;CKO%#p+BYqiW zLA&ercxJMr^Dco&YyM$iJa4 zH^mQJN0J#}XXS@Zq>|d5U6vl2`k@r}_|s6$De;mZPY>hVX_*sa_WQAaEQ=(VlUL8oJC&_={OhrAUHee48)WF z{T1&A30~RIA6J<74=gYQ$AQ-#S-8K?fg*+$6 zn50)FOz_1Fxe}9q!%lX&D`4X?he~z+jSIgi;9?ZS=|MGp@sZa5qb56o)f50QLm?x3 z0-JA)XMkpMysugdwk(9RLX23NYcO%N{voCeX(L`_FZ-OX*74hT&fCm$<-fi|PrszR z?|D)r8-I(8`I&*tm-rYRUCvA|x87&`(qtX;$)A{u=IOOckUI+_Pr`rMe z$V-4NgflqN_auv|y>P69ko&+xaNW%^2LAd7q-&B-@%X-0cc&@fMXQ)p+Tf${C=GCz?ae3kSx z-W6p)*wG+Sp7tOqd7XD1aUi8f(v}d~-jp{euaHD5PmSC_VhaU zp*5c0Ke@4jTc%71$(98L#+F|sPu#;2DO*$g)Xu`K6t*9&VDe!LM@4BD`ytKYmM`gl zAH}D2xpHsp(ktRhF;B0LHrzhn@4-eJ-}N=Xh6yQvxX z-iN%fiG?J17{b#Y3F?F?oR0Z#06})XOy(W{g9SW4HYmj{LO=RhPk9W6EARJBm#{ZZerkEQJ59t~1=UpkX=WscV-ri#N znfYAk;j%~Hh2fA^^@nbH|0WJWhx|VPVnCh0M40K_C*<-yzw92Wn*gEqI`iNy-1K;? zFLn<(Wi?#(q(6oGwp{gNDtN>9JnNm!x_j;Y++=z@LH$2Lz^|Ye=16NYo=e``4r*wX z6KS>;HPF#<-Kr~cb}8OxxOtaL0C(zDEj{_vv$C;8qe}29BV2nAhk6ua1|5Yml1>ph z?SHkI=v7Ix6pR9$g|iZb+S?O7b~h*6v>Tf^fiwAJQ)H0?CUwVVIPUCvjO`Cn1y6SE z6PVu)v>b+%Y^2nElnt2`}Oioah<`t8JU6mZi~aG6>h)DNnL$ z41i~>`Qi7$+)tkBBM`zD2|7xg`G*N@j`WS<=hK()H`bl|4X{!5>DxU>6l`Ts_A1wi z*QITO;Y#sn23Y2sX??h~8+On;NlPX7>BvEOx|;ZO+-y)c3dr94qRgv@xd4HYo(Ed< zHrCrQhSwK3ic#TzP88#LGWDkt|}*-0)~=ywcwThmobfx!@DTlRpsi>tnN1e(;HDJ$1#4uq)AarsOx^HTS&; zk%saAYsaSju6q_~ac7Pqpc}e;=c=E=+bnD)8Ml}<0%qt=A3LKa;Hhc$&hou3AEgfz z*EcrC0eeQTm7jiE`%R^i-oqQ5G4|S9&ttx%R}cghHQInB)(F%`(7o`?Pt!=}H40T1 zgj`3;pmHDhic42VCWuIX1fvCJNCVjM^E1#;Q7WMuM35g;4XEzLp`ZH>llHcIO$`X> zlAUlA35K=K5Upv?p1s}eg+UVaJUAZJ0crQpO>%rU_1A%9Y^`*Q&W)St=W&fEp`F)B z7SqfWm%p6UR@363rTPc+7${I-OX$M1w7xUj#n;vwt0467;8f6h{QYICrI|kaSL92l zh*tu39MEWYeKdNDW;o_N1+fuU?ncA$g}@aSzws0EE8g%RD%8gkQF7-WX`n#iQJuVS~N(#%13&whyCW}&R@AEzocIROmKHj9G4H>D%h(%f)P;wD6?cWJ~j$Eqr50)BJ$=Tu=ETn%Hi!} zq<8Sz-sP;lyF)c=#U>qm+QNT+c!h{!_HP^Av2M6g~aPt{;>E*VD%pKq+_72Zh3ZJ1!T)cSrt z={fG{+K~4e8$F-J6?suN*=gz2E{~tm)S$r!nCq)mjTKV#Fvo%hA9CTKVT-HH0usaF zTle4S7feX16(fv>D9T^vBJ2#rZ3K6iIvGslk`tj%AK;odhxG8MhvH;Lm_m+Fn|EO> zLpYG<@qJ00*lTKZ9rqQlr1VYp9AG}shxz-1y)Ns?NnssEVu9y|Xy-H!Pr zG84n~ar1N~5_Ab#BJ@Kk*3J(6%KVS9hOM@|Z_Et( zg5_2iTxyci+_ayAm|>=a^-CBaaH={&76a_TVm&f7JlAMS8x9lAE=yp<(F+$mTgPF4 z1%P$qj>@9@2|!OZACA&5jbeHblcV{wfLVh2>+Np)$8Zgc zY!5tVXjWWmgg;wf?mJ)XJPNj_Dex69gTQ5ID^9ltNmq$XRigN;#?G6Fg- zseKDc?2i#WfCz%xvP9HE&#-FkMTHJ7jL~01A)3<+aqsZwN|)13vRfR7tGkq4q$pqn zvv|nEXIhz}WZ3cUCOEZA3Z`eioaFH`VUfVN3v{b}jUL3`nbf4GQVPYq%}n)%g~VJ_ zw!`Ne>*sNJq~2~&BBg?wBK_ET4c!8I>EF+)yUvqmIQ5k5P5pgZ zK8RP!yWe(yFONk|35~?=gW2v{BAunVfAEGFK{0e9PNG_&Sr(4FEH`$xOawOZ_J7X# z*(}&W;vW7h3yM_>LS*~v6#sRBYL!s8UYuJ~EBW~xe!g<*qN(aKMI#Cb5euVT=XRdN zxhCC&Y+8>a4yVdfLxV#i=N8qfET9zX?J*#kZWmy;xHjV1ptYUHF)BRK2{QbDqID>- zh4&Fe6mjjBAR@HbOz;Q6*|F40SOE*@Q7Ig;4p^mF)RVQoyT}$X zY{v>`U32uCk_#ib(m*LZZUiN)vg$2cuQ21J#ul+F4Ipdd ztg5$u^r0om;0<7*wst&`3^O`?E*+x(>7--JCMSq->K=%D;t=ZbhS=#MliPM>Vwt`J zxr`qr>85e+BJ>!e7=c9MDGN`XuboK{&j2`_8l>e;MEt4yr&xEwKIR|fK?_J#bXn^P zz%9~c%TwpEiEHo!suk0B_w+L_x?Bju=Hble+xiTZWd*j{V7REG-wzBgyZIn*b64-! zE>sZg$xLoC_F7)V$sp<{- z9kjN_Z8rDJcH)D(OFH0M67bS9Xv57dfZ0KX1if#(5<5Qn>`TM9q!^ivlz7-4L0awh z7{U$dFa1}!8JRSikuRa>KYZ56JYz22Ow~g$#yNivbcRw@E$xH$2g{%ZS~DLjA?57Q zKjr#U8L{k6Bg%9eeDKsNPIQZkY@qlR1eB-6C7F0Oh|RPi8qq-IB02rvT-J^CqN)ow zOuFS>@w6M-0oQkihr%qklMtx@3#$Q&6@#o3b&<6aFAL6@U7b$kh6}1A&4iw(PC8)O ze(3;%bE(q6Dr4g0A3l{pTV+>3p*q4{U$cDCjAn9rH=|gwXg|FNJ=OG4Xq6y4ixnw5 zWkf)LwkqA+ru|3ODu5m}|?vVEJm7@h5ukgr8_fP;4(Uab(0=d;vPD!G7UH{f`8e;JYnv@@^;$vA!mkw99 zh@H^~_JY~mP>|R$^T^Pl)AJ!K(@|uh*LvyAHnTPbnf(v!HG$%Fyp-(gdr^Lf_7ZD~ zIWM1N2OBSZQ5)^U&v#O9^`HG<8et=(jj%Op6~1gVvQ<1TLW6;gO41$Cn@LhyNvE%5 z_EOzL+Q!2yl}M)dV0s@(%4W+IB|N3CV`6!zuq1U9LsZ;fVC+<7FXlxZx`M&Pm@&@EYgg7!@HBPxyBSYc8GZurP5&qxL<3x zfk2FryxRZ{L&f5skpB0yya?<)9~KCjY)xnC08pbr1aFcWa^z)+%n0g-?$6&gxEZi> z2E;(*Rl7`ikNfdW7^22g?^55@yVD+Xl(|^0aRO0W8T*FS86DL=lmWi6%E+1%X%= z0-^@}dAjPbT+ds9*__i&6f9tQQEJJ{PD{bp!X%21Zd!Jg{+kLEy>Qi`NeucsOnnQ7 zl;C*(6{HoPWK77|oYb>kmX6*_jV{4Ic%1R1s_Q7DX|!OE+c!kYZBgc?ssO~&6_asaoy%Qv{Z<=kQr*t;iNllNUOv3;y&2? zHM%qPXo!vhr~qK*{>$X~lKBjiEv{l`z!|yT8WQ-DD6`w?VRV=5oDHK$1~ZVgsRt8c zn+EO)6+M`)lQ%uAhZ24r7x)D13>-$XM7tBV-99x|HV#dtVT8VxchgjSPbXz&Qa47R z%X{B6Mhx%`HNpu)b1B$-uP{?jQQ zZ>VTS8ATdB2`Km?gkoU0G}%oZS+HDzzAD+BKGL11LHIKm=Vc(j;YP_9 z;z}>7)rDwi64n7@@l|B?8rW_!TmHO_MVw*~z(C z7D2&XS60kY%AVb1a(mOhmu*R7yp3F1ch469GBkNUmY+TS@Hrl6YqX1m!_@9NeY6sPvM=?IF50(KC8_4U(0j5RU2{6N`|s$d!Ef z05xHD{dU-~2v)>)`}z>k9*3qK{xJiu4X>7dLg^?YNaF^PIkXh}L2>G2RTllR?E=5E z;WXiT=~hu-iCn1F!5PWsNr|6m?vz^DVft{)@~KviI!FImBLg0L%C9;O7KMeEv+4HI zPb@^nTw)K%0lA=yhM+X{!gZjWW|@ALOr!jM%IP7B zkz3u{bKKZu(4UyYyI)W4$|{d2?o*7|dp0ONJdcW}-nr1hrW|YO>L@Gm!}5*(CcW{C^r@BczS9NKW;6 zz;0>$d57b2e&A_^;Z>U7YnX<;kBr|rQ0@I}>LAc$ka0vSDHiL_Ury(K<7)UOpu*`m zoGXjC?cD`hH~c+XZ!7WuvLS~a0?k**I9npB2g>=ayD-Nz z9S0aA@SbA$+xCqIPL|e!W*rH264UxKk}`kU&U;(OBi;n4DdA_UT3>j7FrDN*OSxS4 z1Bb>Z2UVlKa6w3j|kpRwGmKFqJmxe+|Ys4%s~)$Mcg z_t9CzTM{T~Bk2b@Ha{;DOSD(gM2?^$iuh|z)ThgEg?B@9is>~Od1i{1jHThiuI?ET z?8NsFHf`eTf?~P&g)xTaX$t83$bD2uiuQV(QAc*BDkM3#FW*9iIQc>7hrT3YQ=<`on-g*nDb zV5u#yjoMFn2@tw%e>vo{`OXg|Kem7kU2C^f&$%=ElLM?7IyhK>d! zBT^zjqlg!sU*IXV<xzA{J7p}mqr*S_ox_9-o~?OxU)rV{l= zSN18R`=!5IKi7Idi-BTKbR>fmppBalbMMR8IOeK23Yb^tGNLr)0z+|4LC(j|x(E|c zWO!Y&R|oVUCCIROF`<&0NEsk7JZ~{h#<3Q8=@zQJTt;{(?7ArxN*>)~;lBt+iGro? z1Mt$g-d{zo&KsVUqKTRkWvptG zOW&(4N@?Hg%@jQy`Ma-yDXV}Z!?w&Uw+X5sI9NvOH!YHAkWD=bUy_K#5J+;6fQyLv zLLE?JhOCvw&6yunc=&JYCBdw-lo&rT*Mo5yiz=I{bYG~ItHtr~N>(nOW zdnt(eMFv2Ru&R!q$2X*>E=+t-5E-k>brI3 zFTXGcOK=-GW?xJ66?n_rb}~{GtxDP()o^puS)6d&;3(MbGpc@Wg-!m&+wq-FmwE@J zB8Q^e)GB!8fDBSHSp*c67Q9}6Y{Qy@h^T{@IS3Y`gU5U=c!#rLDSF}2#=g$^kJ@XFSSkeWiH<*i@lQ$s#A3qrmmD^3QF{* za03enRMzDm8+|8`QTn(*Y7CV&91Vb8=6s0S;3`I~u^_2h0xTV(k!FDkytYueP}Rh> zKvumpn0`?YI=HzeGnqc65#(Y_aunje-{-lOQE~qw^B3ybA$8YWiVX#=Nj__*Ta z)Goc$GV?YRmOK<#6Ps&+vtW{AFyBn{wx+0KUU$k_{De=j+NOU!0m3vmRx!Hgi#35O zxID|f)Nie;qH9z$(@ZMs2VgOT9-u6ahr)Q@H81Di6%9$+#0LDeBjX3w^XT>s%Q=t0pd3A`yoc`wJoGQtBao4bZK)rvA!n5LNRg>I;DV%{um-~ zGjaNSphp|2Y{W&@Z(TdDL|pZ~)<;A3Xmmlq+82h00A*12ksERcC}&ahPuD-bTL_*T znwS3b<6X;>KAqoFKeyGA;)+Bu;lm12JKHF8@M< zgd_k)X1atm{t)b9F1{A$djWKgjVDoKV1&NJwS5Z5mEzZ$#JJywVGKXPGTWKKdTw-m z0!zawd_X&1CjvXP+Jon~1HoAIV@~IB-}uBh%?hGHLEV z^lNpUM;A@7iMYwf2Hr^%G!-xzTH>@Z%5a2>F>4}>x$3^rP2w_~%iba@OfmoLjqv$T zmc!K<(CF@>FGIz0hdMNTsO`eN_jgt>L3LqQ=cW(nVGMKt$xA#SqMMa4Mu_RH4`zIL zNJ()JSf8yzWuR(f<{rH8Ml^gd%KYLSbnTQnPVEdEJJTe2%S&U25%mv1VsOugGLY$- zJJklLSA%x#DoR4_w|2pg9`{4_yT<= zfNjIq8Bjmmu!U;X#k6fD9GPMjVUCcH7KCj%MR~@3!Y-FgBEMO91t2FU{8H?m%zc=2 z@1s<2*^=K0rgKoStZ2Y$Q|ln4m-?*tBLC^nni}Sq9D8~m!ig&Mzly+FAf}pxpfG*;iEz1?I)DQ=2rQtK zHpN#j8S;R*j)lzIU(EQMlW?hGwml9Jg?h#h^X%ZiSQaa{1%mb76Mey<@k_+LrsH$yW%y~ugv@=9F26rF1i$jW=#>UQ zmqqo~&Rk_duAUl4Xf=le$`$Z1HD{pbS^Me@%(@wxl!|hVXnB%W##yM+k5Zl zbbWdN6O<7v$@A&2@3+AI{$GqO98ez}{ur{0LVpkbjh`G6;=5RGdEl2Vtb8|N_0Bq@ zM&;;a6V5sYD2C`!@IyFKA1)Z!fS#SO4W1wx`SEZ|>;Cs12+&O2>3gjW4NT_LPBe%p zY%XHKd}IBTcvd|qeU^vxR~Ps;uvzmx8=yiJ7SnfqtB=WxQ^qK1ULIjf+@aC^2lA@f z7w1pm@Qf$_Dt4+EML9;w#NKJUBs?T?QiQbKV-W|h3ZDy>!Zta-n1ZZJ*K~wFI#a~8 zX;0fTa)-!amzml4R*FchvtZVeeB6Sbyq(SxY*!{ZS4 zM^81uJ3|nmj^A6IC~PLdG;UI#mINs@;=R(Dr;LRTC~@74$GQQf09)~eNh*C916|SXOBZ{?&-dr|+~s=B zNTcdB2F^JJ{%Zv4W(8odjAArng;6T&k9c&X7y#^i&-)oUOsGEfBdjysa(hopG*_-l zxe#rxH0X}r9bLKQP(}{8A9L=|E(olMtZbk{whwL>kQQ3gP};$3@A6EIk$*_ODAJO4 zr;aVgB)J5adF9j}O#5*Kq%GCEp|+RFCZh-W*wA#uW#49<-P!u|J&GMh1Yac-;ydad z46Fk@SZ30NBHDYO{MEA7nh@-8z>Kt z+Fg-pi0V9G>auVObzeq|Rswyzdw=}-3{@eV6V;zgATQuD+NGyf1=`1@Xz}B7&%IhX z84O_D1Rj&pvA$<+7dzX1P?dL^=z^f!H_huEY1BI8))>LkfGwE{6b<-3^Rv8*m|*8c z`Ee6xHex@NC1dkvssaVR7VZ&<-d4VS0@>}C&UlA#Yk`?F1m{Fby1f|H?|{v~VqG!!%!4^_`9ejzwC`FZmt%1|zU>`vAlov$aVV(lhyET3N2*bgDR! zz^;L{gzL|d$Jxc+wNf-Y1#u2Mkjs=oML4s3Ui1Xd;bvoA8IMPB2L_#g!Tb2m&kHTJND_bQ)671<~*c(g#4-Ab_ZCIO!I zM`h2iP^+@!H^hqR5|2t0QAI}zOGQ9Z52{HZrMq`xG$D)@%3WUIe55AUQiMbs|9;ur2{@REonS58JxTiRI;C($-@-`Xq?Pq-p_-MRpC-?NKW(jg~x1$O44)mu``i!I{^J%(_TL@E+r` zYlGE+#(6ze^sFFE9*e0smu@znu+e@mMJ8aKsr477tEe|_U}`cR;GyYYDbO%}5AM@< zrKbu^5^T+pF9J6J5+B5nt&vOMa65v zAjjCE>bR=X2D566>0k!a*B1A}6t6P-CKiHw{H7TdI1_(4-t&#vw|ic=k$3Sze-j2K z{o91I(Gks))~~{u!`A^{uJC&SN8a1ORChgpCgB6zP|;yWHW<2myBCf@&4lt|ViPAW z6HZ6i1-KK;w2=`bEyA?ZaqcWkcqcziTM}at0QHepZ1eQ@cEJRt`fn-8x$ih+{_AIg z51{JUGZGMv}H&%27^X6bcI=~V!A<@;VxQA@gp z>jmsm6whTVV>50eq*B6^UoFuAKw!9(MS{=^pkJc-AA9*X61^=O3RAcJl1`5iXs@rjH~4%#>G)qlqZ{P=?_SuC@qfB|Tt zef0*Il+Ih)rmLV6Be8U%3*Q{nYTP~Gd1GCoy7{YmQ?$dF$(rFjTW=jS6faN|r&RGu zgi_+rtuT5~Jk7sB-6_~}oqxZ~na1l!*<5LUIH9~lg#NnBQQt0utuZ^SDkgPzh7;N`-Tg*^#x^Dkgo zBxWQ90V;kPm`a^42&BpsizPM?R<$lE=n~HgcW~?7ujz5nbl+n1zU;hCjRWEpaO~?u z+~1V7OiD@r{oMbj*PjJOTqdRgB`@zH4^E*hRw$Yg({Xi7ICg?IULHE6S?SM4Bk7Aq zNY7^bkR&EuMM^rLy~~wH`wuFWEZHct@i|WL36bP?J9DPGRosu{SyKZ9;izrno}j); z+L+{4gNS;_b;hJ`yYo27D_qb3KfUQ+2rFzzGC`Qt^FIL@3wKySXSMy7Gmqfv-RlOz zbeaq7_`}5wzZKwRDq!SYDXH5*P3i)s9&ahk(wBSw9r3%jeyzKI<+{N5Ab(y`JAC~; zzYdLpDhNKJ(!nHAyu+=B%3ewccLaTJ6*Rt$Lf;mx-tnPJaReQH#sgM2-9eUwA1FbiqfiZ^>ejv=)`&k4Ba8X)hT+NpD z^*5WSBdrlk$?8?vF%;%zQ?uR^c&4l?!x!y{4Jr1%Civ~^a>cC{CaoxPBuc0$as#ST zBHxnFDhJ3U1GLfG0(~{DG3%o&L7J^lM89O=IDLUMI9f9$39j=Q`p4i_YG}vf^ZE*c z!)x0{PPAJCF>f%g9?0Ef`CONTOXKaFGK2mWuxipc8m0!sjy^;cLiC|8kV|=+GqT&q zG(IC}(d-R{g49+|J1)4Xsggsqu2P<>DhnR&>}(&>qc}_l-W_l2Fr)CKzZZn+p0Yh& zE*xW%e`(@s3y#s3)QJ<69KhTEzy93sZM>z|`O?}gb09BnCzFuH#*enxZ zoaDy>4FF!b2<2XR-S`iO!WQ@8>;v;sQXzeP@0%ao3&U=g$w^KSS%4#R@kZn#xc<^{;m zB;dPK{<|f_ne_+Gy(1aJubFyhf<-5}MtN;Eww8-=5-70j$??h_A45_mdx+FDq^Q+h ze|uxUHj7p+mDVU-yY?~MZP#;&V;X%mfr}<|2LpLHV=1>P<8XSZn@wD@ebEX(3)vQ0 z)Qg~(CcCi&E6v%V*eu{7bdpz(tMd*oywrI7j(VHA!%iS+NX&@2+p zx>ahR9!ch}0|-w;<*8Yt!5zfHbx!qHaA0WaC$c`S_0G^=GCrz$(I-Bc4SCbB5pJP3 zc6O?mt&wW|%O(BKqv9N1z(d-PpUbV*kUHD zaWR0!2=7q1_H=Q4!M#Xm{~Z){<*=yEhIL-Z%KSxJpn&pAB1Sqag4nY88x8|C6?jcAY6f#J;79E$AqKCp+28W!hQ=2Cj94*X51~p0$RM?|LvttuhTol)R z1){bQ)1Dm{n`SLL$g@%Z5cxz5;UX&h$=Z^egZBGwfk_MQ z8RshJc^fIbXIBc~mj(C{{GQJ)MYKT>MY;_U%SKsD;R9uE5 z@#LNk-vU4iufnXtc_wf!PS*7HFA`87h zreEkNa@pz+D+#7`j$OMukVWp8xEQJT<~N|`9wY6R>7W}?|hbH6_~I86=nJot@A^=(g4$K>ddI@OFyQ*h&KGP=hf+! z#1QzEHa!3;>L=s#V9lLA*|#t71Bo{!A3|T038Wio?b@-_I*`Z_^PQXUPm}i)aZAQ= zy3gFLA%n)4k&lE3*R90LB;5zW@h)*sovIk4z7QX(Z#8#$0E_0lg>Z}DqXpCO_w=O* z&=<@*lI9>xzS(|IHXAc>ht7e{PY?{{eU#fFu;c9&RohyaMXY!Qp-vIAP#QU%mnGeT zCo&yH3n>13fAMvx>0VAP&7urGE*h?*5Uskv+FEw)fk#6AMs)Q1dT>I|@$&9PXB@tW z61k7QE|3m;S%mDW(*RuYTW&Q_@p#^KCR{NxRqN9uU!U&bYRXNZRC2wZDRLGjU`1BL zf<6~K+qBq)seb?YJfhrmVPF=fysaB>{Y-?nLCTLuEgy~5d ziV7K2R|fQoD))ndg?;oe(ilO9_R}SNg&I(pKN*I3Zd5N`$Bx!2gVG3nkhr#kJqzLC z64}hAUntSY8qK5`_CMAZWUn6Dml+$jeXb?>hAUXY~{W#90^7UmT z{9HG6rEK1f3}2Hj1;e5{bX#I5xo5RCD`e-fS8_A%ni)MZ;e$ozkAlqI?3W!E*$-}6 zV8hKBHl&b;W$V68Tt!dkuRHkA+Y2u-G;hJLDuL8vY$pkOI$NCL7$=ME^?iYd&a|}8 z14|l?2kjR3tP$}HT=F43R4P%7A`6B1HXk?0)7m(=9H?s-ImyVpE3aW@n_JjezXDjr z8Jgd2!;=8En~JI3dcGb2<5WI6Yamn#nj0sB-IUQNKjW5l+$Ov<-y}!F<@bPd#ov)6 zHyK++flz}ibHL!nEKel&LOVQ|nAeJS45m?ehCSZ_fDp#^I^iiM25TzsgGS|W@O8Hq z@=-A>D8lLG+gz*E{C5!QXQ>ox3qPvn*RhrM1y6GAP_}jDe&anscD{m8?IQBF7ql_C z#LSE4r%$O1!8eH4os_1R<^{MChZ)ctZjV7*Ml;-vCcn2cYlmQ z29Hz{hp`O=!F44WkgOsFdk!8Y1c35L^=@Nwonl6>eyvc5@{8jK z?bj*+lYW|St3R&%X{5ng9LXb;z-|Tk@Y{m&P;{Yny=3vLQajSeiAka2VA<1xT+lyk zqRg|8f4Ew2hcqrIzOCE%`EUB4=){Lqg@*oC$2&hirjTyIb~+?-uMatY84fE=Of!Km zjt)rqrq|*Tf(Lx5^bn2nssdH&JKppz+C|zk&zgA%0 zlIcc&xFn(b9I)4cfi2=WoSm#V>*PBuduFztm(cS1#jpC$pZOaYQ^8Y$ z(mAw7y>B_^6%q4xn!KL&StL(gNh4sw2nWm;MLnb`+zJEn)qt9+^S6pj{ z*SBA-@VT7CZv_$(SD=W35@At6$|Y|5!CPJ~5jr`xT0evz0dlTamW!DG`#5Orf+>rQ zmD;9Pu{stA!+R)5qrc~l3x6y5?J&J-QAOqR=uceJbtfhF`uKMCM0GpSo0+=%D38r= z*NlP7vwrAD+({bK{_bIIHk!Soh;CeK9*{591u%=izlcu~Jza8pl!q8SfCF0YRqnnf z1^`$d@mnnz3>fLY#(qL#_zhM9h7in{@Qv+3vU1)GPaozc0-vx&wOO)swXNhYt){r$W)fZ55a&4RX2 zNqYbSbfU@&l4e|b)C1<1f^BUqL{dzYty)zhsquEv3(E)($BNv9fkWT*1bDk8uHMB* zk6#4<+%GusTb`dBF5Ef)X53VD&Xf*#6Z~3p`F~qUIK`=%)q#WMe3BBX+wRY$s!Aa| zqa<_`kt57VhvqH4arkf)GPGkF!DW)hX>Mx&e{dt6 zVO-2EM4fkxX<0=yHGU3TmNo3y=q1|e?iC~yx0KbXiZrv2Ifsrf206mJVE#Tyg)4N# zEB*G4;w;QWvF{IAbq9<~_co;W4Rbq3tuqCTH?)(f|5VpIHbUb@+yKK{(Ow)T{9-U2 zw_d!o8+!|^S|pz`JE|FYf1d0Uw!p{Um3b_k05EVi&KICQGp@(cs|)*CsmBsll>O6; zqD|8`E^BTPZ!=}V-`Nzq81{K-gSZEjF|#E8Vn1dEg#uW~UbEuYvpq* z@zJ>^hrHg^_2{Ut5+`3p0gvda5C9?0dU9t76#f7N!u8h)IAc!Mw-#AH;M69*&S}$5 z(Zp*u!C21tYIBa~^bpS&G0pW_r~_pmSBluj4~8-2Z4K`p<^kn^lTEK3GNVKiq3JO< zN#wjbF~1@T7{i>}{R0d(9(VUMqqn^uoXGOjj56{X>omlCOPpl0q!d$UAuHjq_B0V( z!(m8d$CSMTkR(ypzu7jYH9c+Hwr$(CZQHhObK171ZQJJdy!zhnzZ<);RTWuzewCT` zo?BT_5$Cz*o309gS$Perh8uE+Tq1^3_e!?Y>um+IAJT^nN88UY1n4 z=1wCRdEP#6duu1pVBCRMF2w3s4zGs9_Hn-4Ia@GpK9MNUSRgAmT->{H`pCzWzV{jk zN?A35H_*An6~N}vA#RDbqzbv8vr(}Y9wJVVzjN&}=_ad?y}N9xr4LNEnEkf=Rnrn= zdULA#I2!*|oYsGM&cujq6z70SxZgzI)I;mK*3D#(a;m0}_zMso1tCGaYh3yn(kTl2 zT|`byn{9PhZB7Z}6nZg4>S~*O)4G)0r2p?6R^iIl~7fo7d@KH_)SFUZh?&NNp##&um3E) zbc0iEQD{{4(t95$!t|Z!DewT$hxrHYl3+v4^?qudEK*fJQV6cpCI%F=2>4@P)bk?= zEUB{U^sjq>I;+L?8lyTErA)*6q%z9R@it`4qstRXL-lTepbKN}6ZIL70QnUBOh$7* z%)t2984j*Tt|Y^I`0mL!FD#i@GF$6B1%k`Mpenz-oXORVkusP^&2jAs*)(K&O8DUy zEGvtOADA9ki^waqe~^X9qw7nZiXv@reh-s_N(Q?ehji|-a1gG|%14>Z6LfbKkkbyF zd!w4^`@){vUFXL`-ZIpU=W?LJ25SkZ`1eT2T&O)K!p>vz7duoqP*ry()Fb8oNsb98 zR|~^ITF%H?S|^!p;7iF~$gx6)aSDcrV$+ER=d}TNq^G@R*#4?Jo5)%LZBG!@(U1&} z!bGKfdkvn1y98SQ<02|pCd@wuEKcVg6${Hi?FbiziATTLWJAL4OGXwkBNrX3p9^yVw%6+Y- z1xuEQhkBJ*Na?6nmmu)MmCLODy8dV#(4a0SNK5^D9grBDoIdTc~@hULaMPk6jtbHYFNotaM zuy{@{;tF6<_c>kr0ckDQ6O&}-?Vpe($rc+Q(yKCn*5I9@pBe_ZC7gha=BwaNf+PU^ zHI2}j(WRFR#r~}*5o6$fhUt@y<1|9e5XQVsR|SAY4ydmy>H}oh}BD@+l>neO%qlq)G&Nf7)t|)~MMe+k6+Mu5%3jYO=5T>_Pg7m!P03 z_Go_au^ov*)6`2yCpu22^NLY^eb7#7P-u?urYk*wbJuvc%#T>Qcu;^>s?2d?SK=eR zSAE(DGErVQxOkIuzP>&kSvC7Y(ct{yf~mT|=`v)Jb%G}TCh=e-vu<%d@-P__?Tyjt zvEkR_-r-{Q*n;i;&enDK!Y%d*=FmO;`5RF0Ek@NC`XJjm$GDD@$m>(#!-dA@@ie^}H7Y_-c>hoYagqR)rpY)~*?3Yn<4ovRa_Wi4; zR>-Y}PtPu7H9HA`p7R<+g@Le|Ph-BfGwrK%pS_rs&8?4^_&t`v>a_P7&X4?u0U_~`1KMYh3ZF!$fPsxRVP%9pG8N#ry}Cakw)A*4Z>Y_W9R zd?hZya}OLO5-o+ImgXoI28**3dN5h?+>sT!-AW0UQ+{Dhc#?CY?r`)#81=?1eObfC7qEHyWXC$ zf2?h{?}v=8;FeUmurLwY3aX5)iFH9UKcdE(eK{%YdbssQFJciXOH>tb*BQA#b7Y|$ zZLDJ9Vop4#RS39pJ51>%oQxsWbGS}^;c#`j1;lEfyLHcKqL`lZwjh9%@k(53Wh`f za0Cm($RW40Ns?q{Y?VYiwoir?=jg8>w=HKLu2}pGTTg?@@PkOO3UqqOpOlG0<%g{s za<93nzq#)ci>5QCB2Z4<)Yr+a_&w8UiR{fegGaHV98=~vEl zyK{<7_FTUbM^q)|Mmg%b5zI^c+}M^0FMus#EXz+N0LVPOA8?~=Wnn@@FKuB$E1Cct zlC4irM%IQ7j`l`+R)6ng+hySCGmU<=*xD53F z>@7dPjVrDO6$>ja6&ni^E<9C+@^lU|q%uGxjaasPF3XVor%D7D5*H`_g48_Fsea>H56qkwN?-;oL z!!KO=f1Lhz9EHuk8)0VrJ6iwkd=%;W9J*<0zVc+Vr^J{r!8^zib9v zW|sf9ahaHz*}gqBvvf4F|9-dBb2Jh(`u;YIpuPw5UpJ2{;Dv&TqQY6T5^uS`;;bz5 zZvhCd?|*t+C2!mzox%|vpgA~M|2P*D~~WRMdGzVO75Y=_RHY>uIh zuPs*&pGTIZb%#ltY7}fZBnCt!lSU+d=%~K1&5gVc@}0AmOkTsXi1qC5mP(w{KAJ-hZL*h_>R4g*R< zs1SzBpO+-M0e}I90#8At@Jt2MVxpeFFZ&DimW*xZbZ^->mpnNN7jOc93bFXR;Q`Pw zX<0B`RPg(NJZgwb!Ct_Fgo%y;;M>Q?0|+YfL&YY=g7SZA4S@u|&kvP#bXy+hYtHoO zIA?i%*R+59AaL{jV#EgA-U^DGVI{-_y#8Y7^|@{IsIs7tu<{6`5vB z%#_FgbQUIvCpoh@l5++U8k$o*Rb%75G_etD3y<8lh7tQ(5Xj5+MF1v+jCm0$*an0^ zW(E*E*at+~TY?lUDh9~63vk;Q!Q*?)b3sKM&c7#p02m2R4Z^C0IhMmGPzXTx6D&aJ zjllZ3htmhuXXnRU2`g&EqXYlm`e%^%d)Vjd=2k81bK(U^|0fvX0RSE07-$&(E|7PC zF7E_%*zL_HEUz>mjtu4xhjx9R2b2^pra(VEL{gbR=u4Yxvk;wY9`f1q>oV! zuCIc5vT+PI_Tz3h);;d2v29zPV8~r zQ`95#bp!~X5UdBwq`#aX(6THE_Rn+5q` zv>*Ol^G|$IZb0qdj55Ge5Z8u$NZPu?B$`!Lzhp+tXWH8GWbO8bxA?&u@_ijhV2D5_ zHc-qD-BoOOgCybkN%XOFd7-;C7FPAVb=tS|Sp`Ja=qcTE%IRprpf^-j4!GrqqW%$~ zN;3abF3?}qzP_nJ?qvb5)|C7>&#}c<$n6r90+l1=Ee=jaqtkPYgiS?JaX$iY?r0cM7TRL9?YSz^p|B=JHuuUX18Z1T(YxVwIbV!cZ{SlYFQGs>?ex z3bffAlYTVzD$ILnmSi#=I2r0za=i|nJj>)PgzJGFEP^HOJjCou<|9U>o*BauF^0RA zU><7wh>Daxj&Q;@CcROFDy<9+{CL{swy|<;=Yh<2kHUtwK5d9@??RZjN0nbxwjUMN z0Y2TBA>tqkDBKU2r?4*1e;1`D3UC060Smk z8(?PQ`yy)35)3t|hCVGmUru>T^8-20k+*h-)h|?DFN{+UbrmoXi<*#VDr)LU%VB5@ z2Q`7C#!gY@8d2N?D0E!}O2BaD(mCmwiWR%yKMfmN1K}r1a~4 z@>Ps}3nN~qWLJvk+}k)lLX@>66#l?&v9-ew6ESLqH8a{s`Sl#iIANHA3U7?g%#q^G z{kG!bo!V(OQLToG?~W3Hg!=gPL*+R?zNdm^6Pp z18U3jAI+w$wOAJHdJIaO#0I&u_I*>C2-+O~EHrqSs}h(tf8^KD&8{hTfj1gAn~((# ziR1XO&6(=y-DbuJw>`7mo=B!`C7(ZJY-?{`3}?mLKfpe%*sxAH9zu_)6sA3fZaoi=x~pH2S)t;AJbjc5ABY_jgQ$H? z(aOOhdvDf3-!k$b=oBj9&s4h$>4?hSQi4t|%6-1)?CsY^imE$49J4N20i}u{sm_E) zV}PxG>>Yp1so>nAn8#)0TC$4VEN{F`7|};N7f8#By}^BQk*5VQOi6^mFLA{vcqb0X zj$fO@t#|LYvS=$j9n0#ww1k3J%7{R1&+dpqIf{;ia6X zYCBCMt?WK)wR3nL%tL=3uBNpm{2`)K+MU9mc;bF=E5Ovs!vX>HiQh3nljvU0kgWX6 z4V5sOpvkTlttQGGrj2|=QQYOCQZN+X4y>1&`!{4v#vH(PTV-PlT_5~WjDGDb$HA0dPaexyYM1d|msCBO zq8o%Ikf-2 zp(r#Kg~3VJe7BeQp$3l!YX<4e*ud&-dI&x1MzKqv3LYc?4enML)arV1(_+dJ<9p(o*Y`3z8 zD?$Tf++^dnxE;yy5Z;d=`<*;R7M9W7A+*w!D+h zI^s~}acw~zNzhIc;LDyi3ws4e4h&1T$m?WnE5!-FGm}Qm!0r zodz>J6cmu_gPN|$apGR|A;t&-kV_akYCS~Q&X;0pJ%*flBllJp2#a?Z3{_O z(=l{CF6S17x&iP<6P=CkBkQ*0du0|(#Q)3~>DP-Zwo@*x?9$g^zPYA%5ZC@>s(6?y zp+h4s^a{lSEK0+Q$0Gvut4on zc$E&FH2s?2tC105ox)NS8LlU6AF2rLl2fz`9$?`1>7n*ms4beyiNw8&QHqV9Embe_%D%b(M-Oi-!gu%_n#EjE(C68nhm(x!;egn~i-jWjzKOES))5W7C^Y?P z(shCUV{!1&h~;b`W{8;hE+GGjFCpSi+rbV|o}3bBg5+2X(&0SmWo8 zwGoXeMUF_|t2V{ZpD>-Q%WAcFZDiY7wmzkV7ds7ER28Y*&2r7i)UNrSo$O3*894R1 zKgO5$;xZ%YQ=~t9JG(qa2$~fb>I+0ofA31L^Rhr*JcT;c>oZMjWsj~t=c!htI7pYu z=!c^GLh`mx10NtK_=6yB>up1(JE|j|bOt!2$=j8q_~1^`te+>4UAL~Qmn}af9o+(o z(!&j*$7~~Onu!@8re3&j(&@B3CRr%X&%o7D^@ctCs1}ucb&z;5HsKKOc`j$vpIly+ znA7xIH1GE3d#hrM46X^W;}XgT|M^1+K$E*_%_Q3OQae;^N$+GJ;vt)mz@y6o-^WA?i z*!wlL36(k7)jTErCKtPcv^u8q`4qp}j^&ky`z}~aB9ICzzK$=vs5%}(HR*cRuWKWV z=kXQ6K_0I{uC^tAMs_dKNK!3gWcSL8V~<>~dRm>6 zd+31{1T`^n3=@VacU4a~vdf;5dzSH z2^3BYzMLqkQ0;}K(9{QJ>NeD$Qo5WoH)@uUTE*w`2p&PzNPyh+UE?&&3HgKcVKq#| zy{@`;IvRy4!90x)%$GvWPCun02U$8%16Q%jL3p%J^i&?CzMW#q&1T@xrSS(VenPt* zPESH70U0mbOKU4x+qt*oNe~GR!sU+x#AkmipC}zvQ|o~#424xaqR=v6peOJ)kV)9Pp+UB zUskCCxWs)VrYqh`M~Yp?Nv*cdk|zP*PR4JAnRSGGr>-qhxy@&LrS&V!3hNLBB+dN+ zUhV>1Y*BvJWiWupLkQ8Dy|X;N1POB`>j6g<=Ap3dYUs%Ma^lx^kY$7-RXp-+M*Jnr z^&`hSNEGK!B9+}RYBH&$Cn=hU@unavGQVuzh&2jJ@D5P{`1@l@bK;%jlU<-^IgCt0Tl09JOao2n=|wlNcA;Wk%!oy$t+QcdU? zA#SHKE>3b4m&$cqWrI zu2iDMZh^*yD}jA^StQa*ZlG5NV%e^BEsO~7NKINrsHzo!63({0&uB>pe$gZkePZr` zof!YSJ{KJAhUR-Q`Z#>Ea!Kou9R|dqG6`p^t$5kBMikgLKH=v)=0jb;m1Y&X#v7Os z&MH_Dq@g&FCH{U$2~djtzIbuIQ&LR2-kOPi%PB2<(|mh1v4{_hj?4Y_rI$0o9TLO) zle*B6dcQq(_IhMVEWi1;ARLV4=NFjZNBPFc*d{Fd zXC)?|h=H1HcsS#zN2;qO;-dOKI?$;X zOG8)*cp+<2nn_S(rAjYUhHnA=BL`3qm^!B9gvBu6iHg4py@TOqyN*3^aDZae3J&w1 z4Ku4u&|UeattRwh-t$7;Sk49L#pxng+V0rM;; z?4<^+{B_Si&=|m-6N(2iIn!=bt`A&1Sj1azJqOBpW?3redE5M9cZYQDAH30s{n-P3 zL>hQ4PIaZQSsu!u!ByRoK!f`Vad0Z5cVqq-@oSqPW0#kOaQ$k(!q`kOd!)9#{wOa* z>M}hP7y6CuPaSsU)GcdQ&&GILU5filj#3Oq#wQuwH77o-^MAw9e?>-CCf5Ime`C^r#s6jv{~g@^|G^q0C{+K_g70uP)juQj z|0i{TVrKnk1pI#(!~Yv~U}O1j=s@=!>HiOOV4`P&`U_N;{=r55?)~?ZiHZ4tBZ2jR{}2g!T$aE5=|46DF3Vr?^dB3SfsUDt>Hiy1ShfSl(oQB*tu|Y& zEiQd;{{3?CvFh@fVsqHaa-$)~FQ9k0P~uqHR6|KqD-669Zv+~04kpfbZk}q4bcE3a zIpseDi=HBmZu~I?wumYW66q)@87Z3yGK4BT1rkhbjM^-F{}q(XR7;1Kt0Jd>ESFI5 zxg|VAT>K?0ww%6-_&&V6JowoSsVRk-MR{%sN%}b`Ew*i{uWM;yalv;@BcR4%0!@Gf z1ck@p0WSj0;9vqIEG-QN1WW=1tO10k;ejS#0Zrn7WB|m%AwmHp01w0mZ9;P7!)ggZ zXc=`30a957`Dt`?4_}uMR!|W}hqdq}Q7olbC?(c2(961nd_X6xZe##^g80oF!eQgVSZ zGHde7MwpL}!v%;&U0IqKNF9~f8Aw>z85KyF*cdG5%gknf_t;eb?7hbD%Y;()t1LtP zoggYKtRoC@VW>Q7Hb-~-z7poh{l0V7Os{I?S3HmG6!S95Vt=VQ8(NTY-k>TJC0jv@ zLDt2`eJlWB=|Z-at}K=6GzR|O{!aXLev=$^a%U>^iF6;B`uC~{YCi19G;8VQDf~A- zepTi2*2Ch;*k57k&5AS00=xu2_K`10h2=uPZGEA78T% zh4)NKug_Kzjw6=VXl%!+x(U>R+tpFwlWl;#G?k@x@IXiZ7dF1T^4hLF{#;hAE(!*q zV^N}*r_l3-$pG0Zm za~xEL#^0&_25dvW!J<-ptdj_zB6aosE#oJ5&zLebfC68Ut=kS$>K~%EX3DZO2q8|c zJ{~}3Lej>)Y6np}9|0@~PUYm(2HceENrvu%8;(W|O3n|s6)Y*&e3*M~0$>pz5a`Vb zsi`!qB*j^Iai0d06G%-JScailh>%~Qq^LuHv?fzb+}jjx#?9W=ke4>6vLVu1mVcFN z!x35IqO^7Q6_#Dq6KN_iZR^<;BvA(ej$B#wWWzrPpAu_Knf6H&qYxL+ypQa-)n+z_hxkK8z?x97?&5mu(S44(|vU_RzY9Uer0lfZG zT-&A8%P@Pz(RGjdT_;CJ$xt_qMq@$Ecei<#P4?6QR+w9pvXH1)6SgCz)TgnN$gJSC zXG+2VWE=plbpjz^==OuiXo6Aa=j|Era9|?}ftC8$`d(C*dqI36QW#5F%&WGdb9oy|3NjCdg zu^5kjhAa;!HVz|`u>`#JmVT@Glr|ipaw2a`s)g@pq7A^8T7DJYy4~86T~}Kgk!7%% zOH%hQ$h95YBoejvrKLcI6Bi>YzxDt>v&{6}z-G-?G*oH0d$NRe=cc>4UFsS+z)oVY zGq)F2lW64gU^S5+CvTg%Dn%yQ&K_Y1M4{H|P3IY|ZRc76@u~d`wO@|fdf~a5|C9HmV;88wmgX`9!xAi zm~_aZ)7G^sTo^C6*G!MP+aUDLW!)evdWwJjXFBK0b0ssnMl@J_@{}bU1`f2(Pr~h5 zT9-ug9CFjQse#e&btPBt4|MmNG_x&9sxXu68WFP)Er~tR-LBvWTp#iBDcH~BW~krX z#WI#4bJCdS%iNb#Bs;Sg(2@!;6N@ zV~~+<+kG+hFuN#3epu6e+iwVh`G!_?6~tGCl(OFBZ}A>Y(C4h42k_=RfQaH zKI@W9`K*staI+FPW`_rA|-VG0+Q7{NzeZt?S9v@SfsitF7rH2i(;0NXkzuaYnJK{qU}<-)KZ z(EuayEm$0l8ZnKVJTFyb;f)c&PnaT~!qQ{F`i!#3QFAciI=!HRw9f!4_V%HTROSMmoq@XvBqI7hl62tsuu(>gs8U+1=u z;;tAC8Ra6CTBlnQG}C6ivz!K41l6-od_NKY)qZ677^8!P;d4d5T&*iJoG-ANnv#h?(q~2eLKyu*bPGN-4 zduAP%P)4#7L(~IJmq7d18mUQj&i_m*UN)i?711-2)J(&XoBq zh^nd>ZRN)YO%fHoIvADfTlPumhH*}NnU*%$4s*$5d%VxZLAqvs4$d5kZ4(VBDWj<^&`#@2Nayg|?|A=}; z+9Z{h(YtVYBIN7#nCS)5aUG)Ws45Ag$)cu6d+_pJB;Aze)b0TdNnCS_4ZOa;YO|=N zbcOUJ-~QNKm+lHmO&OqOX|&N(dSM1zqyW)pmiKz^N1%T8FA??wqjyMs<%loie#1~6 zF?PtR^Cv@(#WN8p3P4fC0?*0xN@(c7%DT3nwRn(q=$wzk}-$4wE5vbUMLfQgae zE}MP=@3)GnA=zRY?|GwBPGUy1RJSIBbBBKik7Qo)|a-r;*G0)OOT6tDGkoh-|im zt9fVzdGi{St0+yEZcOhkauQjx3}I9n-ISHbc8unz4}Rd{ti z@tnvonJ7BMP|xTajEc6@!lU6KL{iAdEA0C0jUEb8NtTgHUw|}7^IUIIFBWhc5US+dXX+8|tRr~ZfMhB% zGvE+POvnl)uKLWvlWjpZw9%z^&t97L7U;@@3(}?^FFFs?!uGD2CyF&d7G9YRZ=tV- z0YzX!L3Dvp4pd8rQ1;L2QOZY)ANy^)b*{CqTb zu|r@Xp+`_%1;yJ+VI+YdIzYKE<%canE`NCSH{ajKLxB~EcB>_jg#KxO1fDCJ7!%b= z;RfTG!q$aRdsDXhl(wU|(IrwnufaPB>UIx+zr^vQizjb@3Msgz?kGJ#B?F7gi_=Bk zi^%;)%*1^m&Op_d^i#S>_x!ob#0Pq-Cg_ypi-8T}g-#|;+To(S+i|CrlczVy1Cw=c zwp5qGTb^2Y{WPSLbi(i%&_mEBkE?(|Oe8N8H2c8czZ=Uu8kQmfp48(Xwj%0Ag3((&C z2+F{dYd|Gth*a(yf*=@6GJY-x>6{1%Xz~PT`Wa83+TnSyPuvnwucEW4#|$uhPfLIO zU>;4&%jSx|zcA28TMD4LUnt6C1jCJ^lF1W}9x^4P^MrX5e%^LP+e}U-Adc$5aOK7v zVpSbWHd*2}T{SC;_p?e?+iSe0g_^o30C^-^1f%=_xNP6yFhO&tElWHuBHzUtvXSNw zrzQxsWD{l%4j3!L`bo51-M4-#7<>MJUt&*1z|f$=fk2DUE9P>@ex}*#rk2*zLjy#Q zYk9}-NSYWs6#7TwSt(|Q23%EwKDiHYf|#rthAFQA(2* zoslJFrds4sI#C(gaw>SU8xj`gZe>H5kj)ro{b_QD6+z57X(2N3A~d2aKSbGGfYo&r znG`C1M})VPdFYWp?f0xAGwB|Bq$B(TyDz+*K|irMcn$pdju9k>b^2h6s4=&5hb`-P zhNExfq9sLlz!1=Y5o}IlWmTQ&J#h|fDu%_lad7l0v7C=k+jw`hYu5zI&D-R)yEy?C zdofLJ-h{9OMGL=Vk)fW=jF?V6n`1_*cjOj~DCV|W@u@5Z?!k5zQ@nDz<>a?|ecT~u zIVf=ruJ$1l=$iVnTj?5w?sWwEUa3X0kk!QDDO#CNivBS%0HufFSF?)~dd2^%X!o~( zN68>qH;DW5$u8|S)vload?OOQ`7)iIQWTf@Z}sCfMGFB>^o`hr0khGIe0#*1VMz$5 z51@Bq>4j2yu(n6ugHuEQBp!s%UjrP5doV~N2|a|g9aJcaykL>SsyG{*`4W7gYsGlR z&bG;2f|WNpmk#>cG%QV=D2^oM*@RMjEPJddZ)OAx0yaeq*~=pIpN_*%ay)Ml(GHr5 zs3_#Ha;jZ^L@mBgsab1kyfFL1y~T@W*t~0V4l*pOr}x$bT06ADLkLW*@R1dKZnft? zCDc^t1-pg?3kUnaafdR|4I%y@zcJ!5D>?IKwfJyNPRbK0@VDLlhz#}CaZ&YHe@F$c z{i4is3j-Bbf`1T(^`<@^@~vHl-r?@12=IBixt(+lD{))}t97j<8AWfTZu5c~;&wc# zk7#vF84x!^ubpJ=--pUW1(KwMn6EL;62fm==}g&sYOHI^HU1b$^2oo@QK5a3VnM5N z73QJvjuH1}JE=SKuWOhImM?6=C1lb-hYvwZ5m#XqMy$!|sO5c+KGk2{Z8n&5$B3uY zPAA^wpa_kMH1{9K{tfrAND{8Ql?}cakJ)fumDb zsDZ>zCON?cO;JWp1U@jGxjw2#(0K0q{TmxT2H~6OZVS_|S8IbyX9DO-Ipc+k$Yp zbLHR`q8LEd$bBQ8N5X;-2&)L!=YU6J2s@D*U{sJ(Xe=Nqfk|N7X3lpor5RE;e~Y)u z&;l=MLp?59opB~}3e3({b9k&7FX2qeS#P&qOLv@0kw;-W1_mezxA|V7e!PTk1xSn@ zH80BCA9=qw2*Vozl$G8CY3sdIiliq|)s=|Kx^|PE_EX zyJUtcWjuD7TrEZ<68E$Kuy9jbGk{7F#({V6lGN=K1I?jcQ)<3!>u0IE35}Q6B3Ht* zhtQb?gt8Hg?qJzlSST>TPymc6b2f+$tWM48_CZ2`4GDHw$ZYK9x&+_+N{;XSqqNkz zTZZ5o*(2Z#a?7q)W^_p zx|jP-as63|05|iq8%efb=bB00cPp586MFOfyGsuKz?e<_0y^GO23zPR856{(&TMbn ze4;$3W%770YoI2U-Q%o!o;*LBaX@@2d8y>!il8AGHuuDSQg~tke^CuQ#$G)vO<KO-S$QwL$&R`BO`EYI{+^H6Zze;A$iuQ?iap?o;uj5Yr8Uw9;$b%1% zK8^ZFjja~5B;jUpHoe*WlRL?uHW|`=B(%Kwu3O4#9#3Qo_JuI~Uf@a0&?ESgvhncC zbjAQp^p5#zPY}%u2cmJ0*btAZ6WgHd7!vJS7{JL9Er2z1mCR4TQ;HgRWsX?xQ!)aH zsbW`?JM#E9lM}bBx*iG{#iqjg$w zP%uW`*ieQS0B)Mgh&Y6CYY@9a7mXi!?U2VedM#Wkxyvb(yD2*DfG*n5%uMb55py;Q z-9H!*p8%-MnkcwCqt+!N>m^td9Ftfkf#t&G>$riB4sh+ymZ$4cMlIo6F7Rk42%*V0 zl4zT>lEGnn8N}zg*xgoJjXeS*!_nAOQb2(I#L}riZ!;z?@Pg%csaRI^4pws8i8T*r zF>Ptw$0S**qM{~0Tv-Q-ei!XNq?uq8&Mgc&XJ$qI)J~b+9~fD<5#iKhjRRMRbY_@z ztQ<1_B64xEGFEzD)uEtZZZ7vJMh7`)zpGFv-E#rXzrIXqVm<31I}TFVeMzLB-bPO0 zNj?SbtuDsMs)+xw$5Xr-AWbp(eeUE zQ%|pZbdBJfn>*v1XDnu{>t*8wd5}(s5OB|B8)|7>YYuUggxg;dQk42$jro_^_X~82 zB`$kSwynt5pd_~(Q_`hHm9C4(2Jbt74woeymzfM1V!}zf7!K<;=hjtZ$9yNeu~ey# zNa@-113F4UTq{n*#zv-Ttq>-RBUmb#DWFWLK3$2Z>$pkzV8b1A0u6AB>1k!vaO9VvRy+|ZBPvjkOyRgO#z}Yi&JcLMjqWV=E#Ma-`BRs|V>T-rAaoF`K|Id#!61s;$y1_jLtNnRtgS+E|B%z>-DT ze(jZ?tEPyi?p`m;i)vU}Czi>G&1-4k!Dq6oMFx*fcvi^Tu#yiIq_#AsM-`Y-ES=?= zxa(uZQeWK67Ti$0yQ6a)#xlF8kbat)BKis0QJmZ4i^A?{?=!iC829nzD#isFKX)Jq zj*nTUgj}&joy=fM6vN|Db68cd*5uv-8Ep+`DbvwNG$iFlQcD+XytIDFG@)<;&IDj` zL$Y*VezFRIVDN;Q>~_!h5^~f1>1}Hd9|Vb_-`gXPC*+C!MC^xUsdmfAfPsvEc6I*o zlb+5gt`A82?LijK^S1)N(t$KqpHSX~zdNC1i-~3?lqEBQ{!V5U@`0}YYeQ(4hFv+M=^p=t80e5#@D;!7|e@SX| zZ0dOpR~@z=Lh_*)w_>gF8kSgR*D1h=o+1~&IIz8jJoRuY=@ccq4_%0 zH-Z&NS-DY{^#iZ-s||=ny&Rt7TI3fI<k1yk2R3VR8V=iC~ zq+GDUWbIp;Ezd)#BS6Z)vq&N_TI^orE-~5z>DIIil+bEvR$Bd7kfXCn@_{A}`4>j8gq7rYk}5XhP?N3%KgVFY!Jx->B`!t5 zxK3CLe?2mhP4O*{feV=94)0HSD@;-p{}T2Z$KTvD3&b&TtMgnTbxbVGf*29eUeL6r z0C7yQt*%CLkr)qx^E({!)CXqWF?}O%;S}Xp&rl?XxG59bB=gpc#|bse^n@81-@4j3 zL6f}v1w}x7wL%;q!QN~suS&E?Gt(|%DZUb4MZouj#S$6wa=bHzSz|C=_kQqF-Z_hx zFVOuvKhU)S!>;tKbaFCp+J<_xOF#R*=9)YwyOfSZEn+$j3c-?}>F)r`Qymdo+!uZ=SNvhF> z;B;4qEliYi;&f0dNReS^JYw|ha2IG)bI!25WU4!MxsEpcJT1u`{$iMkBkVX+^YKpp zG7NNa$fJI>0r<@*r{NH^Y6yxIqG+NNlWMi4$Or+bTTl!@bshQ)n#M*SD@UG`ozRxg&K&ct9m)}PaOZAR!5FD>CoM$F8@e;!8*Eo|Rdr{ZgvQk* zSkirZ)jvWi7>jUhLP>bd93b_@ zOHM>Vbny@__2ti+oxA{p4S_SGNk&JZ*3`|jQWP0dL~qD|4FP2)TQJ;w(ARoQ3v*z; zIyHMK0g%}7n#VUx{JJi6598Euf4~}oXyquLU>#o!svD$u*>rvard-o7i%37mSG{k2 zj1YxYd-(a;5GS`LW$7hr2O&LsR2JY=B6zxq)_1Lbk-xmmw6Nmd@L+8jd$B4p ze~#(k!~;Noc0oZs23_xCt(u5xH6E;U13_=ydK+=ZnD%%iGEl}M+1tOsd4v$vgJf$+ z3rAZbnQsy6cy;!PoYawgF8tvzkicZ!ZjT}#va-*1j-OfON!eZo7cH}C1LvO1#(PYI z^W1HcMzY=kN+m^qeB=u}}W|;r{Mp zEN?e>2b%;mVZ2b&saeucpT6J5oBj8OtsXDKegAcZ!MPXwC=%6wIjmdEVmLm}Hp~?JT1Pgfb>M0#utRXuM8Iye?Hvi&4T2!tq4m4ehyw0ILMj z$>)R+(_5cz9g(__atJW~WlV#44y?<6VBcRG$8v7CnQK1g>9!d1mctpEaKv$~fdW!1 zyK6I9YK2~SwXHYFL)wT76A{#e!m+5v)ghvnm7J!pPDivf2S;|Z3z9n0%%4ubehS)v z2~Gqx|FSN+H{W-IR@LI8cV)t()V6Ym-eC`Y@w(B9oE@Z2_pLN%J%c&FO*%Tq=UB$f zZ39>pv4K0{OID+L!$5Ji4hIh_Il$D4w@7KgbOv;#%4d-K3%i^11&4il`VXG@U^wAP z!_~c=Xcy3pVB`sI1RLt_A|C;5`&2DiUs6~sLe(xUN0nEjR8R*STjTCqoj*XKc#-MK z;W{NcXKa9O$RA*`Y0MJZj~{z<(+jnvr^Pdm@v73Pc6P>AD^)2_YM5CffIiT?A-|m( z%hch~hE?%CPDSqqTkRx=CgYtLjEQB9iT%v)u_ycE7}Cy%M$;(xSjwX?-h;%!x(*sS zUo(^M+n0BEMAL&8mcPH|4i8KcHAt zm@GNfirhDAz)P@hsS^PfxIm_n+`jhN@7|Qc!GRAn|mp#6-R03s{ zN*Z}(p9Cf{xoM7mbdKPXpF1O{9>=K~D6lol6FEc?(&*q`IN4PK!`9)*{BHIwU*XXK zdIwQEkahXb*s;`Nh106~Mvxhiw`&(CMV0F4xJAPEf`dvVV?gCQtGa;{H*yX_x3gKW z2Cz2Ivh#j=={&8%QW5bYsN)JT{^ydt3-SZD1yg>;^o0|j)eX+L82sS;sKbY)>ZyZO zr{snx@h5lN!MXC+q{bG6;imAj4z00=+EN$?5@wfpRyqCWHQciMz(P(S^+({0uMFMs zh0BDFNWFs?Nx+3GR@S_nEvF*CfZZA%B)+ICa+z)+xH>(-ERggHZ05XVaslWARsp!S zm$4Nlv}XQ1$B%MO&4nBmuubXfd?qf;sE-YMt|^|bo13z$RK|`#5Ut=rBS803*rtND z|D>j=ii6wTA0lT=MaP+mo#VU>wBjrT1(e?n(VFlHB8 ztqA3B$!x=a&?+^{gh#P1e9R)ejTsxLTx>=(YF+*|_DScB0@S%j3P15!ODpYQrq$X zg4I}ch+bU%TjruJp;vLm9z;U#yJdH)l%C^G!{mTJ?mZDZzLthAyb1IYBHt9ybBW0?g+c@7y>mMu zfO@`=AuLL%jSgpmLSVtkn$*#IkV$bpQ8U{n`ka=!H*^7qUFk13>x}^>;+w~fJOvn> zDe`4gGLnwvU+8D}g1_#QK_TDLocG8m7Q_iRS~mbm+eH8*t&DJvd$C~{jMU&L%y`~( zJIlc|=Omn9^03l-dI_X8R4mlmibc`# zXU-B3Hk&@X;~DujK=Ku2kf_Gfzsxun3;Ir_!XfrmL zZ@2q7rUrX`5VEpGlf6tbAd+vFQDd+KHx(Viv-8T}qr>KfcU6_gRuSj2f}_9dw4V){q#Q z%%Twza-2&pGACST-ZDbDj)lHUb22oR7s~{CRgsOP+scF~0K%my@}N}m$@TT~b)%W= z5MoE+%nm(2(?O~In>;(J@&dvzq1JFFFosYu6$W8XDMxd_m1!41+ufONws3Mj{*Lca z|1bvjbnlP6AhlxhMuihLvJGcy=Si=s@fD8xa^{So^~Yz{@DrR zj<73=B8z4t^nTiqA}C7cr9gW{7Ezs%z0a!CdG+_SOZNj-8?B8$H=;s7z`-8P4?)#l zO?BGPw`CoTmhgS?{3Gdx+S@5Z#qGH|9_3;}5HTZnlf%-aMGGOvLHYT8e&&n|OgNz{fZ2oo z`iOHsd0}rZ;EmU`y*YhRZ>X~NpL6C5^I z3+ax2zbLmi8FB+#l2=Dyl3M522>eqIlyN%EEN#~cNd95#h#tuSniE>M#|;S~f))3< zBP6QI?$;bqWcDgC(o&TZLLqaRm4^o6)n%;up5*gud0H&E7O!xs=}{1L+VF5->U{`1(u;J#;G~Yug!{3)ZV) zj_1F?l@M3$;+|l!(Md()mN#5I4ZiFxif{$vL~sfimf9wkyW*Up&rd^La_f$Q0+}eV zOviwQ8hIZ6R@x3taJ#00mwKEvHiIEH|0F>|RP z0hB4)anO>X{9J6N?rY`|@N(rW!?&{my3iGS1(xqI1(!{6*+v8aI(0Lk^MJg_VNBRDiWziG8$uPSWq&`XHVoxK9i!Ju$cR4qw@r)&^@-P1)3(e~HHVhs4V zHNp27X7UAM26USxshup%lP`+F7E(z`n+u4jYssyP4NSD|r0K}I(~f|IGzz!-pzjHb zs=*penu(Qp&LseN)^>I)Fpnn1P6Mo38 z4z4G8<4EUJiS3gtPymQa>kx1ea(+;zsyp!1|cHSNf8&bH4@ONnRRjY zS{v8e=`=Bjf?}xOy~Nl{IcDm@WV7t(@==7eu36lIF{?Xy(FavKE%u#p#CQ~xOy#F9 z1K~yaBl$x_4-FPXdJm)DMf7{L-9YyAlf!u}sE85Pj{%Q}m=h^o*G~Hst^pgM*f8y8 zka<#qmJE=>RXcw`*`iQ{i0U{rB9cg^rkgEy4Ctr*+2T}}yeHy)TTuO}>6m;Ct2q(N zRk%vg6k0h(!;D$tpbadxC{Tt?Uef#&*zgl7D*C@fe~0I~G7*Aj0`vvk-)i>O_z)Ru z)!=s)aM%%#{_vxhIHuKU^5SH-x!Qdhx+E}hw`m5jwX)>Z6qG3sny=m2evZ#U3St=> z`OW;?f^fO7uP_h+TM$&sSOUas{S=IcSx+SMU{oi0d@NyPCdxpJE*l}|TGE;dV zGQY>pS}|P>#N>@avFzSBQzBNQD2lF{cQTTsjg}Ax)wtatNwA6Bp9u&IsBG_Vu(uLM z#IB^mW~<0)EJI+({t7&ATe=eGv8#t%=#G zQlEQpqUR4twFEf#`Is)=BAs&4D779wowwMMx;Cu?JYxn?^RMS({Ap9ii1 z173Kd*)y6-=sj-zL8eYg@Nf?piTykb^+rZlY)9P@4|cw5n@w)$)k*LZz3wfP9y;P& zAAtAM)ZXr>Gi!DTNi_Q*HyTqr2=G->C>md5S{o=0s1sIP zfZ>!l31xQ3ptt~?Iy6CHIiEtw*;WT2**7vriFf)Bap{D?1Y(bC zQ0<>YQsQxsMC7)og~-&~{of;qFW^!rr|S`nI+!A`<7k%zV>T>1c)1LJq?!vSe}q{o z)Ha75VFoC+D|*@bh@e3=wwxa>Ri76wbqckm=m`{e6HHn@(lcv6=|k>@1zdEW{g8c; zvsgq(4oC3Z2Hey-w`}}b%KTMYooz#GcZy??uUu^5E&1gTCX^O6f&Xmw{<2$B0;A5|Ok?^ui5pP&z_2m2I6ERf7>c!0eAo^TLl)?9Y zwLzwFs=d$JvEzaaL}M_nGGf3qan;xBvjtvSq&-#<-&cm(P+EM=|IY<4IkNlyY1hU@ z*dL&nvv5b*eQ~lTwef9`8(HWu_M8nggFw`>6Y~-)K=t~&Fpd{(7GJSjo(aoFU|^01 zdKPd8H!h+{DSrZjw+zy`k#$XqR0+V*p0hKc7 zxL!X#H)gKyH#=d&@7pTUjCxGSTNR_MMDoBo8(ANdzcRDZ_t1)z&5^S_M?BbU(sT*8 z0Ub4p-Sexn;X`0e{mu|$l_%7lOcwl@$mMft`;QD(!cdjKKT~EDu1(H9olAchmA@>; zu7WRIh4>H5F#2?+G^3!J*V%EBc%kRNL2>;<;NFfB^l7KeA)`ijM! zPKRM%o?b{#)<^c0c&5-IcpIk8{C9Jri$oyI#4x#_!AGsUs#J=Jg zAdunVECxz?SUme{9DxiGM4oWY=*xWb@FK=%JNAQ*2Y-j-U>Z)PS=iWV*E-rNKLHl- z@?*e0K~PG{gEAZ2Z)g%Mq!ZL$+vx-S(kyBnNu!7?spPL(Ylr&w!A@t&@p8#NAYc|N~t z2gW;e4q*3bh#b9Xsz*Q68(0WSQd4E43Rrf}W48>O0A-o?4G6O4PwpTRfo#G8dH46T zR;cg5TaO8iF|lO==+*{yhKk1x0eTl}=JpgstG2gdfA zrMIi}fZZjw2u+>Z)+I}_Nf5b7yS`}UPHclkC(V1?gWQsw=Uq1i0Hi;M)Ny|uB$U!o zaUe0A$^qHIhU@o_xf$&Sc<{*7oW(#2tzm77VC&dagvLIOiiqA0k*{%4zuRvhDLKT` zVG6ULq3s)fUq$U|bYk;2sg3A0^VJxLjfMls3UJd`beEtA<<8}D>~)wdhsjk#orffD zKUuzyD`Lcx&6PWsit-$hjI5Gp@Gi&RF~wMB|RP~#XjF5Y5`YU7yTBI=-p9F@QH{b$?6$0-*Z%;2a$1ldABU$ zqZ86ABij=Q<}56A{x4B?4rZQIi@e%$cV9%o6_ zC2Vcp-x;ITC;$;3P~od=l_&Cn%DrAu85KLDk%*lTWdGTBrbMtU*}^%Z>=9q6g@`=N z9Y=ZR_<7b6+=3$Ixbz`7K_?s!*yngI+X<(<;h~GcUzY|2{z-+bbgP~3W;M~zZIvG=42?75Jg{jJOihMx{N(laOs`!rEM-M>UO@cs@F!k*Nr2<)JQQHs`C{{AV$;+JlBp1Ul=URnG7h zLvmi`tcVAZiIEvIK5)G7EOz7%h4_A5%bL`$;H3iMCw6jlAIim%-|n1MHU=rulTh&K zq#MV06r0l$US;HYIn6JWBX7j}TeM?fr8Y!rIOC2&ibKSB%@fkRNW@RCi|Q%DvePp7RE&LHnEcF=X_2CMH=~Y0-jeLG@O-@I&CTP5{7U4Q~_~Ory)!~WU;+h;2esg ztrwfYy=Ap^%n0Gh93as^J9=77OewcwGvD13EhROtF}tilJ3lGE+QQj?=)r+}SCUtL zf!RssgZG|nKZmuB*+S}3B)^Az#jaI+r=k|Vr!FE|0a}H7pX~aU!ZtENx9CBvO+Qod z&7%TPOJWq+0kXYShRY4HQ-utLw5`X^ZaMMUpyCT)m@>ZE-SW;E3n*V+$^oVXio_)PLJh?0}iL1`jA_f)n9-)<|IHk_hek4} z><9<8yD}77NS0>Ls1wu(qz`6f=qYjm@PkeYQd`~zC2*(uX1%JU&KSH|DRY$6JHrs8 z2oa*qZ%R?*{b@bG_{J54^qr5yiSO7ec-_rWYC!Tw4+O8yH|IX}{N?m<`1p2N;5 z3sqi)M9lhoA;LsH{}rjwLnGcpg`L8#n@vg38~!Ha!!hrT{jOw5dYh@=WWU$JX#yA` zYL>DpA4=Hod5yz7ZMqj(LCqxvc~~4cPcYk)HYEl@lF(nWX-O0Dj2-07ZN zuG~1GZWTO%1meMgNvY0JvJ^0#Vr#@oIaqreAFLdkl7a7Zw zs>W}fpX2UN98EuL6^z#85f91=f;HcSYon#-L7w9eZk_j{^;pQ8xHSm#j$ z=<8YZg3)IO9}8ThF3|En&t8MLuW7RaJzxPcEZw#604lI~cb5rKm1|ExOEpR0KfL8f zGhkEUAK%2>T{q3+kG;fKB0?9(PoS(a@ycnApQ=D-YZ&DLKZIBwXLT$Li61=Lzp&|i zhTXJWZ~K&fx*L+^k=s4&xlw}8nFC5nuA8@+G%`u2c`Pi7`XY(prqWxkZh8u zz$NF2X~K=s^syP*IHK`vu=b9hCRZI7+3Vpg$@;Zu#Q^g1wBIq03X zExVb(NFQZ6rhD2Na*ZJg%7FzLe==pZkLdA|`})h5o)O9|39rD`KPZD*eeVOG;Ax7v zFm-46Mhh+9wwB65KdZN1o`}n};_9=R3@xfM-P79bfwP-!@8nBrXsxk3C<+U`OWnd0 zUnSbX*?@4&S~^Qp@^X~khPqC49mK5sC>~lmA;tUuw090cx-dbPZri?X+qP}nx^3IG zZQHhO+jv{IZFkQ%G5>z{6I0u&RYp`+M&@%)IRlqji9lN5#%%hOaZk1s#7tJFwTbei zUmV)F!~fthsFV^gEbPi|ic{w-Gc7AFT4X%1M= z5jEC|eE<^v=HZT}xR>`XJ)y^eoA_`|WC|#v6wkAJk3`)RY;E6K$E94z1$H-OKf3c- zxe|;u*q1)|@J6Tt3t0w%WC&kEOP@@Tl)}#!>%{(LcwoH4T|hPY6_^fk4(sQzyE0tWcP_JvqYr%}v8 zZ4?<(u-`3DenE;59w1}fD_*nfcjaq`r6yu6zlYY+Pq5i3lQgujs{N=`s3pgXnR?i=T4Jk9(nt z1|_kT7(fl=G!zuN?)j^kpSKM>+pe%Cpx-$O5r z{?Xb=J!_~Yq=B>czk=07+Qp+)tIdXlFHY+oDxnbpfH zOlP8zxcCcI*2+PX^x{NRRCs;rw!or4~`NG<9;i>CdRuBckJ&|5gDR zl^8h>m_~-qv3%GAoixlTPjen@TW{OoNZ!jPjAe5i7x&W5bQ_W3bvF>Nq9O==$;YF> zVy{yTx3s{}YIRy>xJF)7V(er@k=r4Cc5iEED(dghU}c>Mbh8jgCk&_&E68L-;~(2T z1pS}lB#>5N%P3ZCpX$ohXXub2d_=VVknEv{17D^22dhfG23o0jRO`A4OEuZE)uyUy zIkjj+@orxH0!E&H_awje0WFg?=)+IkLDsm%{;L>VL5)EWU6CQ_PV@C*M?%1mC4skp zYl3t4k`MVo*(qW|Jy$o^u^aC$t*picCmqAzUbEkdVE@t|b2!&E<252C$&~uw;Qd`m z+(O}5S^KB4fOxO1InIUiA>o<5bGAQwE9M_M8EovO-=$Jdw}RXkC%n=KWceOZ&wC3d zJXidlJRIz2Y_IGxp%f{?GFCPDcdzw~h%Zk`B5c}>op0J zc4#Zmspv!Y9JlF#&*uR4rW!$X?&UL9?v9K;_r9JBeMyl)Jr~$|>MZq_$rmbC1_|5zYRfYpKypqW>sV2=ZC&RipIYH@Z!RR%*f> z(F^<}-Vl7@i_6k)f1kNdoh2ATW)UoMp`@uNly0O}Jv`h-UIsYG!OVrPeS79sOTL#_ z$b?mh@6eB+43Rv4E8q4~jtS?it#F-H5>j9sJWs@Zbxo7Rmm*QGh~}1+VOM%f{l9DG zxAH9n%#M85*0CF+%bH9_mUr-9r+2JsZsp_@kSBWj!B%);;{{tR?+k*&vhOhjV}HAn zwNRdyB>#Kn{X>0+{`-w=!lE|DduO8^O^b}xj}PYCRQ)8VQ{hoX1t}?A+vp+*5w9eZ zvyOGIlp6!bQ#f|dKPm{BX&0OlqCf$MAf)L=+=P#Ho`No4K0gO`U^`819O#4m4;D=E zYmKQwLVS4ub=4Kw-fSu(;oEB+`2oL$}LvF8=B;Ho?JNyk12YLd_rFdM?v$}uG2 z$2u3FVD$l;E=S0*{*j9)kp$`{H6g}$_S?ieNRhw$pgLOop7fduYJ^Cg#DCxuEy}nSx$Jmo~fe(F9bM-9jS*)})rdEy0XL z0HFGg@gK2jrcv1|I)%dnSQJ+189iQhXo#OjX;7@-!0&05lG>c?Q1;UGiAe6A6XSOQ@1 z3%JpQ8&YQ!*YSvnh%0!jP46!CO_N~RSfS@S3P`esL+K5A;tl>mK})<0bd11GX0tpN z?(-WqEdAakhE$71oz4czld{MuP@G~1Gw@EH39XgF!N2B2HA|260VP`Z0IlB`fBgHI zJ3$3>UQJ5=EZt1QU6{>fDm#<9BhN9R(?QAEy%p<|_W=(!s)~q-b)tmJp^hD(Ilu}V zEyuVwwSz*fHev`D=5e^|#=t8XlRl>BW6~j^riKZN0ibLGxX}95+S4`Ee&nN55tD`e zk{DLD+0Ve927>J*fwYGiUP=fa*WSl5H3a=>=1tcP=4Y(4EmKdTZFj;G*MN1J?)O9N zi4 zM04zpjZPN7>Ftg*T(?h*w?1C1KLsK0MtHg7C}A$;;PvX-HBoV^pG23$s9$YX6Qm zD?fO=UumRoI$8SxNmqPvUg+PboB|5YKgopwZGkHRPRuffa+7CEA(Zm{bU9|gR2bBL zZ&Az-D7OY>krHV*`I~;~zE}*yV*~)ae*1VFKV&AHIHO-pNswU6%v-wHt4xMohjeT( zv%VzEA;ntWo;7Mw&FGq=RxvI}-4~_tyYmF5EP6UAgrjLf%!6%I{6)j`OWz=oMlSVD zvW@{&h|>goyx-ch#&FP{&xVuAwOjkOtzCO{ZKphJ>=RNWjr+N?ugXWoUw{?cGIx&G z)kHveR*D{R+7mWjz&m3yQhRx0MD_8+FYRAz5RNL`)F5V-g;m9h0kj1UX7m$4A4==k zQMy~R;=pU}^$AzRi9rEfcq8&UA}}y!+oIV#v1TL5{Go$OGxuTfx+Q{V-go}>aC6X)0{%wM>!>^caWC5dCQl% zQ8%$dzBC`&dTzZYYW)W*6C&&^HmmTbW&~DpLhmdTxwv^3KkoE|$j~BHx~)j$LOym* z${Fl7#vj{~LW$~{TPfa$${zrFCXhGT#DoTOg10^!U>`-S35if!LnQt`K(3Psrn1R+ zKRsQGCaLFD3CpX)(_AFykK0sBeEsSQ2BgqKTNkFwOH^c#vy=<{LY%l_%|MYv}!UCio6bgPjQ|1_F$*I8pIL7FYijvVCJIz(~s z9mZ=ir;YayAeCzk=Otv7-HSDJV`n}4VDxyrVNAOHa9-I-xC)!5<6giD*x z7WElU#F6~r=I-sj6XI@y*FuN!Py?eq>dp_#!pVEOgI}tTvZ@r~_!snf@V33;_bRAv zZm*`jOjSUb0&tzE#tO^fGV=9`Ru97ZE^MuL7nHQ_#0Dm{Kjn2+t4Jv?mHrWb=w-pI zxmc$U31aJbzb)ok_Klw)bhU=m5lWR<`AYC#_g3-ui>SBi$R8$~DmNv9-IYyL8QKUj z!{TeFq3S8KfZg@RB4O>%H9Juqu5&9Y&P=5v|H$sFxTw&m^>Q!`G@&CR;6!N%8i8fnLY=8a^4W^?#lDxn0IHYyb}MbH0qbU|4%8E|bc{>Xd~tgjKu- z+Z()0qtHxm^RBmk5>k={tnE{&bB)%U#vKmulVA)RWwfrfjw$cB!hq^Agc7b!9fW3+ ze_nJjIrRM?t1OK*(_qhs0{=HzaB?kB+ECp&{mlrE;SN4hVnejYSKZ}TK7&dKHOmt}df{J$!poU-@hh>Gg)kXPj@dkC zOcxoQ`uH}*d62@dLmFqyZw+9{K;Ry1fV5XrNV?hN6#5tg9sF+buZ7k`P*3@;#mFW2 zEF%=b^b#Y>EF4@b$=y}}7fY$ifYDji%$!nvvYI}3`S6M^=MQV*ZvY|~T#iW!f>`?& z>|F=FNinr3wAbN_ZRzCeoW{U`VId5h8sWxb=)XvI{b0}!s3sc+Yll_bmK-=;xLlbZ z``FM;`sJ~zK9zr7YT|=`@eotv-uBjK>H(+)&h8~R0ry{ zHhQ(D;|M}_kq@-qxxyiBvQN6$@gKaN&OayM>!OYnsaQJ^`^@MGDF8}!HEelXFx*bm zWr4~2r)fE50d?DNmU0|<6xJx3S7}B4Fi`D3ihT;L0K8#lMb9cSGu+(G5yI%b?8TvK z?z=L?JNh@4VGctE?ro(!2{;M9Ym3ljEqHzz zda*kLM^8VXF=1k1MsWh)s^cTNy!7N6Qxlm@mxIp)TSqm=8s)l7ye+WfH)j>*juHu0 z&x>;e*snvATZcd6WtS2znBSxk!%@y7TcsC4oe4Nq66epUlbzKQS;%}h)rIGIUnZ7) z5wDC5H08)Y#L8>Do>ljrZqSfk6x4@5tL)>jSnhp2mr@f`0t6@&|JlW(`GEl4>y#P_ z@ooh@>prUHrnDe0Cn}`&8rYVVDt)f}15OKXTUkhYNH+-ye=%PH=ARo3>1oHo1Px{s zEcQvnF|xgpalVK`a;R7}gG8TWJV<73_pWf4_t=Zz@LLc-WPSDw^9_OPy1K%*i^e6G z&>8YD0KY8(nwt4*jnzpF%=XzA_A^Jm+G~v zw!dast_egZ7s>*05QO(r!2LLTN|cesu%?T1vHb^Rtnz2hB%;d=|HvYcf`PYAPcQx= z=n!^~z++L9Bk*~^caBnklRQ&G8@_ZqUiNxQz=^DOAb{;Fg;Vm^2f$f1h30hIE{z*K ziK39BaSN?01de!H+z&>kFf9oXqZ}cU>=3$nYJNS@^(BnwA1UA(ZzHG;C%K;-&2EA* zktDX9T<&99QyDL>k&=!k>=mxpT_$KBh@A&YE6|89bIa3Vo=0#R{PtdG zxy5vQ4AJNgakWRd^R(~14MGo?1-pzV9*W7Hks6$g+J(o90lA?1{P~$LbR`ed!%A2% z$$YJ5C_g&}A%dT&T3K4UZ11@!e@S&X4q~3j`uD>xu|ZYZ%)qZB1I0eT+j-zBWo3bq zW6R(`Rxd; z>l4DaR#a-!SM|Q-uJeAnRHjKA((AcMaXqg;h4Twa6%7%(Rl?cqud#3tg!m!eOxP;S zT?rMR6-=m(7{jFjPFB7*6U_EXq5?20B~>q$kxb@vz(&*W%pY?PusrJLw=-=`^j81` z$|2Uk-JLHjWQ}xe<$#y@gGai@HO@uJ$PhbwhKETXUpRgaHIk<@L7%)SB=SB>=no(O zN|64=8}IBU8F8|XJ72K<*@R~ z|1kCjog*=%no-s9ieG_By#q4s8d$mOU*oiy^=Xl@mA7QwCs=p+ zP6_zBm>&Am=4}@w9VW%R5id4LR+K#wKFC!ZNdXQc#(3|vvWGN2s zthhMPFYe7o)*p$??`7mN9J$=_SXM=%Rl_x(q0n*-7BjphP(1*9NMa`!*wo zmI_5C$3M_9khF_xLX=9sR)_S!uXYmM>HJr+Y5hb5L2B5-VZ|^>nd&)db_h2y!9Xo) zBB9-7gGObw5o>e{mj2cC)kigeaPjEUv9v_DCWge3kCiFg5g+Oxdp61K>F1T@54u&9 zbqg%6AQm=#MOQfMl~r5CNCjlVoHfm}8d(yEz7jx`?1p15d__P)m0=9Tzb1-qDrj zsSVFo-gI@EOfd|IsMnT)lEYKJFW1v)Y+ElCfBBgoQl^Fw#O}82LSl4COwg4tdQ5g3 z3P>SqXXNxfcE%D7jp+TwpYKlPo*mjxNlwwSZ~2DTE;{fy3aa$}QKdgDMS5$%jizeq!4M8XGV>@Qng4wEF|;N;t*%855sm>DWK9v#)Y?aV5Z>%m?mQ82c_oF%pCSGfYXF>$blVn~~?xc~FO%h^b5m(;f|+zIAYU!C(bnXSdjc zqxB7Rk6!E$9$0_*Q}#e+yDhiA!y7JDEB@(+i^BvTeix>*D%rff+g_F&;;PsKlx^Gs zMecyGnGNnY@aw45W=?*bsWZ)XUCV2|X>H1NSgyM}ENebONH%srQSn4o{%ifg?AP*>Mj`co=Nt0yVwq-l~0lx zne~iw=Ws|X?6{Z{c?T=|1Id?Y54xC`T&Q84A*}{Ee&T)3n$@4S>R`f4(YvsnYa1*N z3cqAPG1(byX`4xAF&cJGDmhQc>zV3 zR?F4C0+yTqe$l;bx87v3!R^SOHlivW&+_?~-BFQ@T#rB7X|b05Wgi~Kug9}3b5G0} z9&O(HD}G;1%d>SDUG4yDzYRiCA;4+!ID?X8nIc!E zl#5_cOjAS+(x+`}xmslu$zN33F)x#vGx56 zIl+8(B3Y-}L^GXQriNRO9Eqehnu~9p;xfw8Sc)BBTQGUrMAO|QY;LknDedZ;_mOxcHSgmo zKw;x`_p!X*;|wJ~wIS!i-l}=#brWtly9xI&?6+biO(?_IaI3gsphJU`nY12GEims= z5`vB%M%(68=3UaHnq8yShClhU#{eZ%e(wr*!sQV(X9(LNpR?=iyAgGFG9Py#yXv|+ zk#&Gy_RyIjr}fqs6%omYx(k#uQnGQ5SX?;z8-;01lXSaEftAu8r z1@=`vP9-`7iPnd$dqNR(m0sm{Ye(KQ^>)SH8hqLai_CsL_PkxV-h5N5yIIQe+f0sqfYHMtfF7&V^6i0l?|AV%z=^iC-REtp&0Q5wJ`KjO~QmjuG z&G$EuR-Yn7&bSOsPRy{*-6lLkLG^NrEm+Z-EoIcaqM63(ddFftwwIr$?BT~B`+;q$ zrzecq-xio3_LUtLVLrFY;E^9!XmG{{j@{ZQ#CUM(-oz2o>HqSYg|h+c8$xXq&B{=Y zC*WRj%ULlWTgQfSkDGlf*(M8SOdI~h!ZkZ9XV9l?#{3Gu!%_A~Dgu(GU8NR4g0B^4 zs7-KLguMgNgYRL{_dm`LrjI1yE$CTIzvo&zUpOTwo^I#}HC9v?m*|q+H`@e7et3Vnoz7w*xSF?K|TV%skJ&zyw^6IKaEj5gf zpHcTdkE00%p2D5j^5W+`;r0bOW@)r~cd$b^E$}Y^au+6DNzM2gS863Y#$hF0u#`Z< zi<2v3&Ya`o)OG(2-7NdqpvG>y?dG_?$x{Do@eu>NMH@p>Xr{( z23|t1WxTu%ex_^aNbMZy$7!?O6drfhni2mVVz@Ij|KO85aWL>IY8&dRB7GH!c(+5S0Hu+TKy%y$KQ z&6x}lg9{*T?KYW+>M^YqjzbBxq!qCJt9Y^CP8p|FAB}R(Obv(KK$quAo?<7wwCPJ?std34ieGOo{Yv82lF;G?QsiTIy@wn z-~~$cATfa>a%&>KDHo!YrrYGuR*@RJyI?1~g(li)2fuXlH zftac*3g4ZMq1{FNY{JZJ^QM!YtehR!(|Qb_RUvq*`l!qfX7z{Pq(DXsU(Bd*%bH@Z z`>!cCk`XY8ZmrZNqzfM*ps%&K)i`7@+TN!_!l}&)g%+qcE8%!?!?0E_zhL+a)KJ^ zmg8h;<;?qYZW9-eL0qbp1TmAQtMxBM`5H&>o(4s(u@5|*w!M6W#BbfzXWynnkLGia_66BTZGf#!hICs$<=5K(8R5CMu&~>HDZt9(=CWk#_S#YHg_t z_j$cBv~lImm*3=tuIyb9QXN>MI@fG}8mkpA;DHRmp{Ipep(zo@Gt}|G@4sF934ix7 z$!U)|;>GKVENjyJSQ{!O0lYgVJjiHf7H>u9~&Pga9oXS?cs++h$W_ z+eq_OZu2pI%sLawf?PJh5_Kw+TLWYjb!PcXJ<<-Ci#@DZc4oB5?t2qOMY*I*y=}-o z;hLbU^ZUyw4!gDOmGZOOuLgi=>kkm{-&`S#cx8QSl9cpa_DwW}Ptp zTYZ`L^!`adI9t9tQQ8ppSP)+$(1oZX_+`>=U0*v1ygSC4kYcDopo z%$wC&`l1A`EVvu=-Cqna;RI{DTvKa<0T--XXX6}F?zjQH>~p&aG6$9;u`7pB5JBU? zqK(Oj_9Z0BeWOIR3+$`KGcf$J-(jIi#TW#;zxs1wRtYoSG{lpTh>IxdY!+Ac&4hRi zb7Da}3l41QoWm?dXP@_`Z-qpX@HEws6S0)Awv7__Kt@B!Gpu4hhI5!>7K3_gcm67# zi8>zsipOZqr!D9D93Sp0P0ikMez?{!PYE@%IxiStUbuu2T%9b28mY(9h06ieNj+_CC3D z@w|&^OJ5tL8k57;1CPOYIPtb3B>{&IQ=)3tA>LS7Ye$Q-hQm(w2C{%0SSoag?Hu!! zjcmX`mmi6G;;0m4T1pq;r_JS$y)fj;T7_|E_`z`+YjPJr`He0U<%|yXAV9~5 zq-CvJ)s0$S$hyUV5PZBioBB6BeCeKVdMV06xJ{vjg;*mW`4q2dAIc}HLpNhv#sYh*J%fsGE1t8Mpfb z-m;9SG2IX?p-lyr*J##>WABnf5UTBSp@K^N?_p4jFg@5OvXEN=jk8b2R%8@w?P}Z9 z+@^%7GTOR^I0JqE)s}Hr$poBfo%>`4?=zXVwq+UpCZ$5vx_1gJ9(&* z3Je1m9-G#ewRtOuPw4U~C4v(QtoC>qRdgL;nTBV&l0R;CK0w=&v-UH1hIJucpJqS88tn%>%vVZ9qtobvJVe7gji;9YTg^aq= z6F&}5_=!AiNH~QIED|`^16eJCImFQ%c;koU;|*=p;|v44y|Q8|8~URMv;Ql&>-VFD z;)<)QrmzuRw)lGq_^REPRsl6DqD~@|*b>$QOcc$C!=9yvdx;9;Xa;jLvQkslF4>Ql zN@wfo?Z6_{x-)1>qyae;w-`JsSO5ix#SM!1S3KD}AmTy&4BHLT>!03$=9snWu^)Lv z721bvoR$QU*J81MQ3Usr}KjlEt@-J&odWMbxc3J< zmO^L&3PrvaXTCnM2Fhwycm)Y`j`jAFFRZ73QzW16IX|ac-$$|N<0`{^$YEjRZRzH1 z3_*nedb7l7O{u}CJ@sccfLS}Qx}$o~Xc-0tBd1QL7iYqc+T()XM`QHL_l8G3tIN4; z<1`fhsc+I!=f7t;Rr0RSbjWk));!bdLOY7-J3TY7^Qz(&>eV`&&my{bo!(ZsVgTnW zdF|O)^KU3pfVXU82x_QC4qUMvt9ho=926CO984rNrH%T5o3Otn3k|I*g*>n*RZ1aQss9WGv&Wpmi4*j;xT#v&G4AG*qq)|*Ed{%^hMM9m1iTGDS=Er>tr zK#j=tnG?K^kMY$YKxYL4?~3mpPYAP09v0CK?ud-DxSt~Yk%NKfr*=Uxw;oF-k^m5* zy~>5grX@oDlP=NKZ#htmHyi5E0@0k^X-;V2_{)&9<8jUzQ4t=m)+Ip=210<1P3V;a zyk96@Kwl&TIfE~6@62rVr6gJbZ!MA2ViTfzA4~mvI+(IgH5XD$(}27S_sR(Y4*#m* zCycrE?Kr{+{ZKCRPOoA?seqRlf@+NjMwxXO9! z9+SIz(DCBq@GsuSx!SoV62#Z~wrRwak|Q7dXOMB`)6Te$`2|&28u8F!pKVyE?ZD`b zRwKF=mDXd$7!X-P-ppr5=W1p?_SF}p_f`A@{3+gpNWB7pB{tHZ{}--*Qu?hy{;-J( z)Hzqqp`~>0QNte&RT)MDBb8(&dfq<){*oG9Rt$KgoOm(Uj0}RM43kz;Qc*-%Q%g(gpbRBrX4BE$$X0=aZAG znKN=B>xlS5W|MqJo!Bnyrq?Pvgvq;mTq!K_pr4nTOr$U(bC8YJlEkw?xU4u}2%8Ez z)x_%a^~(FFx(Kq1?KANYB4aZ0BG`fth}IHk_QKX<&qKz7-&n3?Ia8Y-F)zq-_F2I~ zz|LB04stnMo25mzR_C_Ew^kb1>iu335ZgZ$Qx1uXsnkaY5y)L9IgqrxO6>xGVKJ#9 z9EInx%ljeRdB}71r1FJkgh|BQ3f`C_8OQAQf;0F{utH1@$h66g|j#hNxiY##ysqfojCOfKah|GSwMYa4N4Y#cwc!Xj^Im#)+B*%kNWvNmA`(%o8dupg@x*K z^qWq)XmlnrZB)q)!|N3Tl$nxDnlE_lA+ND2jfy~s0vTB#_SOIJHg&pc0$MG--*NAq-y+!Dfqx12ziVU8Kg>W1f6*x-m z`;>KT|-n^h`rC0<$kl3BU>`EE>FnYGy49zs6kTvl@+r|%aRUsu!Ynb!xk%Qb)ViDTAjo+)UyM|2}sq7bi*?a0j-3HHr8p_T0fZ?>mG-SYiW zt;E4v)2q=8jLYvk+*1!3ic;%kVQJJLQI-pjjY02-tOz}B&cgFe09EXK9Ah864w4wn z%T7_AENmw}_Nc(hs?44n)z-E|?nvgCP=Or_@1!{r^zRRRpAG0^a;DkehoEUcu_Zkx1bGZx5 zzTl5aS&E4Kk`j{h?B(d)HextLn~DbvoT-5+4ybCOQB45kJz)~4W`bWcq`KVqaRFf~ zk#1hsWp4Td<ArXC1|D1I)2;=ZY3zf-8n3v%gib0s zdGmOq%yFV)IK>A{B_J(5g~ie@s=3{D3`HIzE6e-CJ4IQy{CUPae(tl+$LZ{b5uXfw z9b#YyiU*8PHBwp*Q6?+`>>_PkJXVk#UorMza9uI^f>1?iRr;4R6N!Rdy6Mlbw2t;v z^8yl#nYoNx^x94gGbJkLqrv?7EXWV|i=$KOxx=$txNFE#(rPcwC(MQX5Qdh62qwZ& zW3wYhAtU>(!uz)v!7d%l(BMWYGkceln%i5pdQFHrrf~+4loba`k6U2rzb8RLs_7?S zCRxMKfos=lM(ybMQ=yYWAuJJ@-CZXbnmDZ9B9!J1xa)-34gDl(;f=E0j)7Ao)5&L2 z8%wJ7WNYqh95uF{kf3&`&~bIOu=021g;3A?Mrc?xZxCf&%YQvkE?4?2`kkjZVmJ8Y zTKFl59ww$0Qqn0y&DDk|ulC2pIZU)WKAC*0Q+}{KVp`qyjAz8RqK+Cp(4$KraF?z} z1Kg@6wNPurUeg(5E9p=I3{Z*^UVeAKxk&P|i#g2uY@^aVyPoNc>AnM}*cy&=r$<{DZq%9O zd_ix7IoEbAV75|9uQlSzEpg8W*i z=jiji+S8jLY(sJ(6%niYe%W=O@WG0GlQPz^z~3Ypq{k(ILUnwG!F=ew)nib=w-+Jb zANJOZu<8vH_B!EQUoaI60%xyPKh2 zN{dy%+WFfL&mXy>xtZ9D(5=7O7gP%y@hr+7gA|*=5e{_T!=NlhHEU|Quf-xxzoeOL z25CZGXL|19TNK^SGq0~##!lcho*gT_H)b2gszZ$TMA^?Y?boeDg7L#1|B$uEptr*g zv2K^-vY=$gDS@=Nuhi{0V~}+L^;+uMl5h%BlW}e_$!5o^bv_rrPqVH3H5XKqZNR+s z=UMvz9b$BJe1C%8@9{A#w`LV}@y}JR;IzR^k5@LYtT1d~S8KUtp zo3Yh*QM2g8n3LI%oH3xQjX9G_lEkb*$@rc60HPk7{B*k|H=YAIq_*(OQH-FdE}sQn z08TiwtCe_%QMQ9HFD_bJpKB-e{D~g(2BGu?!v#I#oWGkGhWtP^XNGo24rgbV{$7X^ z&AL7u4A4XENeiD-mTRK+4;l@uK*JVqJY!7fhC}`f{PB9Fe>mKqE;-X~k8OSW-SwI* z=s`G*I<_`vRy<%TR8?;eJ(Ev+4mkp79viMI{xCm%R#AekU_fo^6)sT};2*su3I;ch&_XJ|Dbt6O#EI-%Zh%{?eMY94Iq_+~}!fPq5A(TdBvYDUunhiMub3=3Zkj0IzV$ox{p zCWaPXcBAaHQxuqDB0=Ad4$|kJ8wp%JqNh9@x3S;rl*qW6M)WY~ATVx(tY^)t&qXjU z30^p4pG>DfeNvN3xHCFX(LSAxqaVoFOEWtHNX&t?rF*9&qZ;5ryf-=uftqst9P6zU zOM9^ddO{PG;TCIX22vDXIG%4ao{v5C;btm7U$<-Wt`0;a{vL2j7@Xz>2$tZgH z^H|>o#%Ry&kL~J%Y?Imf>J!`*hd**_Sa(@{wPzT+?c*;DXF-O_mi?LaC*j>_y3$&9 ziy)X)tYePLt5{8=ul{C!!Df@viSKK!(R@%S7-IO1lb!&UB_I+3rfZ*WujzVZycXnQ z5|>$U5B&maMPEcw%7G4qmd(31r1NKWgMG2MW97FN&Meq(AJY{_<(_*25U;YUFkh7X zPbEYN^E78JWfl$j@_)Z2@up#=m@dTc5_ot&w-pXA9alE?>t)6GY5cZHKdNY8{a76vP=I@Y z;NH0EwgoHYj)GW{Dg$COOILxgfx%mL*nJ5;Z4>msJlKq%ack~UUz^7qi&M|J=Cmf~ z8;BTPsX@EyMb2<+*PO4*Mfaaqo%ehhnS&>u z*w7@gvcvw`hbwE1l0Sg$*c;(>v}I$_VM}?_gFwL&_*2w*jOUad*<{W+wI$QZAh;r0 zb+TO#Lm}Ud&MhUgR+k=Se(tSsGse zPuX$2V|ftlsg>N5jm#P>h#qElz=SW0#qj9P zh5rV-=k)YE-6eE)gUyoFHCs9TieF`fUcV+rhTrV5HVW1sj0A=_F1Sm$1i+mX`xzn_ z6B7N9fIx!y1ViFToa$Ii+Ve8FQ)%K)d<57m;RlVPy&^~PSY&W5hOJe%`(krJW)Qrv zlg$MC95p6ZvWj$57y&mb?_v#wGDdg2cS6!nP;!K6V45?yR=)Oeo6Z(ElAzan`k@Lb zcj?}LW_%0j=EvG@w%BBE*X+cx2p*{OlleS;CKQ9+aAZ8Ge{l1M^oT72kCfw{o6g2=;xJYBFrbttk!P1$p56OOW_U=V%Uk% zd@TVBd)yaD^RcNTPWJ|{-E^rR{MDI%>eByyWNDqhJGndk-xcxz4&Yx!IUM7^74rW%@4rKgA{M5m zCXObyMkY=GEdUS*U^K9|ceML%;^1On{f`>{vs*hATMHvQV-q-zzrOoV>$#YPqm%Pr zgZ)28$Qb-rK`1#?SoE=RJY~a3w>csZi_5)Bv zF1-c`6NH1ZfI?-8qStw{D$PWG1~$Zl@S3}UA6;M9lurzhCnf?7_6t4FM<`>7@U3HD z#pI(MU}?32Kq=+(`2ug)&x^~JmqmzyYj)Cotv{Ll6v4jwm>NrV^llFZgT{!q*9zaW z_HMt-suk)Pb?ina=Avo__A~1}nC`Tvp&Ey(?1`nB6D3*Ow}=?U zA<9xI&zZY)TKez3Yw$2ddS0(t-hHvQdS9eAf7+>!*GY5AXbaJ7z_<#URvAPY4|MSX z@#(jh{9khD@2vgnHvK0HL`{qHviTJW=;Us|JgxE$nHBp zi=KlMK+na&24LZ2X9sYybLqe_N;(@@TNnx2npvBCcEu>@WMuN0OPoMX0OQ|Y{x;~j zSU*oAY+x^8Vqs?P4B+_NRB|@4Q3bGl&Y1fDgyGoOK9BhumH@DQW{#}E_kU&|!1B-G ze^XS<;VDA)$;k(_)0H{d8fC@<(!Vrj{C=`Uv z#%(0hs1#BILPI4uoG~DdVFbeyL$aMZ4zqcN+CMhG^Z4F#EUr0C+Erl^BB29ORm>XD zh2W$5!a^eg@<66Gd3x|-Q6QDBuTn`JvO@)kr3m367VyHv1V8A?j8K`@jy1r4@a(6y zKY^ISAOMg+2gvhKxTwA$eC^qBET-uWHK)U166sslhmr4@K>RTXpmP5`lL|sNJC6tr z`TqNznjXPC?Lxy7VlpOMJ zcG%xAz$hu~SCr8AE28hOg>vmT5_j}_aA1c)az*s0zWwCdEPuz3U7?{CJ%kDsAt!KFZhr)uZQ{<;~=)asNi@?)Sk;2)eZy9Qlir z3?KCB1K8_(-QdOh`MQIH50giMF($?Wvz`y38j;sbj2^pCd$H&5Z^MVC-6U@}3~ijL zlxS7qj&~xNVo#?Y5_a?j0)5L6VS(+S3{3KN14NH(i9oQ^FcD&zUmGJirx2l`IaO2D zc0P*}>#_F87=5dFvClt)zH)t#Ldjy_pGS(cf}v1bfczNj1EcCKM*krp2`ab)a@`Oi z;CCf(PEQdov@5p{8i~vR!Ks5kmLn)!0K)tQDp2%=)b_E5*B8ro8*R3j6D#80NoaTN zJy_~B?0scpvxf6A@f@Q69g1upgqds%GE8U(%qLL)>jZq*^`CdduX3P7^7v>@ZHB&g zm??a0LH-7)RPsUa7j{<`A$nH=+~ZQ&JK4VIZ=*bXA3x`)$MM{`kGkD4QsTKF`3yFE zwJ$eQPTk5(IlqkI{BU_ke8opugZ8vUc{rVR;g0K`Vi=jLB}I9M;oM)M!et=Z)aNqr5IeCUMWRIwka9bJV@5yUe#}Y3qA$5)nkuy_>%hn)rNh5^nF$C=i*Jq9)`` z#@+)V(W0X2DKla&)81YnYo|B7*&o?h@MB*ZPaHO}o_22NrhMH8B8k{vx{ssF8^^t& zpt9$s!?C%~CNQ$vK;@SAo1QiTPJKnifO~!@*1R}-3lvIVNXdzVACAqF4jwU}lyx@9`=OEP#s?cpf?;d01^DYO;nzVhLOLEG7}^O!AD# z<7pRLrm8g^`|?{oO6$6YOdw+KbHWR`c`O;EUkkNgneF6B@egvx``!?5dYFi7?cno-LuQ1fv#IUw4BpwY0)i%qR zAO^Rc9<&|$u~FhR(TShS@u*%kvrAwxWjNfWzl(ydLs>}eiEBJc1FKVweVcthT=Ev@ z267%FuN@Amp6I=wSf}o4%Ml_Lv|+K;G&GZz!m#W2tAj>OU81DK{YeibsjLiC$O0(1 zn>WK%vJ&lvS?zO5PBqtG`bm$eYu}s=# z^4Yz)tEhd()VZY}0<6jMQjR^zY})tSG^B0ZJ%}(bPJke5haewm1PaCrDZE>M#>lTbmcOfeZrISb=#Dq zA)J^BF{UHh@A!RAIqxRz9DtQ?(I#@E ztl>Ig#1Q*TI4vvo58%N~kqN>$B@u?C_&eUuSBk*w_|;iJok#!Y%G`pJv8=ueYdB<; zj0mh&9-e1>rPSY}$!04?{KkSTL{#iDArEB@1-;=)EHToJ)GOEbk5-%+#t4ny2SW#2 zH7<|Bc^{uqrfvkU@=>_IkK4ylEkjyJv&fUEeWjbDZ#_w)s^~s#ad3JZ%)@ycu41w$ zLlf64=}r+!JoebX7G`S^;D7;pCuyHxO!TM&CadDQW06IZHagT`S4UYQw9t_vMuX(0iW|x$e8WtV>{I@* z>S(>BiY;=j5#ss#b16*{t&D34Vk=Pv-0B&UO1h%+oYM zz3R675MLDUTvqQ8-)gO7&MffjQSa)ph>|v4xyF1-|L-0d$LKM?E6ep9UYP00ZywKE z9)oW2k25a9@Y;0{;xpf)lyv1Iv~xEWI{a*x$O=tu+_eJm@-1|hjD@UNrr&;DPmU;6 zsjWcL{}AJG&95KDi*HIP|1rZb8abxcbHyn)l^QV`>Zg65smnlcdE?;<6zfz=3)#G~ z z-#hc?MEsYGk$!`?A_vuyiY`Mv_CHrFPExw>Y~^>e#mv~0IM|s7*g7->YcGpMGxybV zYG>ws_4)e@`nj97HTHmdRU-P1hUOA3b!cIF3a#UQ0%Zqm-CC~9aaFTkTKdiK%!C%$ zG~M=Wr4&de0cz&d?0xPjwo2Z277fOZF!jSzJS(g&rDwU&N%N1n-D-Jp&M5+A@!>kM zwxROSuFq$MehdKleR~)@f7cYw=0pN+;?$+b2bt7@y*X%;ZyV{l?47Zp3q#W%Ccpn?d0QB~H{m!Ph#8`wxCzXE z5KKti*BzuG?f^5;rAg_ZbWFcm#FlIW^keJ-)5TMg6nH$_0CtuQ7=!HS>oOhCO-Jsl zRe;B{Z3&}&K0d^EaA^`!CwkC|`v^nBI&$_fj^I>{f7gZYZ zx)}B|Tzx7DPYzlNSZcDl8)e#&sa@CY<8E&c2!SCzq8b<}`Ln1YP1+JZ`s?gL)ZGXcj~s ze)GEPAD`H&0un@dyNlW8Bju&D(}B26S5I3G#u2T*T`Uh8Pch~laAS~^U&cdSw(Ut> z=FmH5&$c$o=W4{RwC?|~I-L$~qEuMe-h}NqR7~&ESp&tO%3`~-Kx#y7zku^4d3;j= z(dMsQHH-d!p&Kf>XmC6bahFX->e=NAnbYwdOAJM~=iH((svr5MAy-rt63%hpZj))x z3EMJ9whQ5d@xoH)p3qO=5jI`Ebf++`L96CZWmOi^y*`C$lI>ZyZNuJs@mY&+)!L!P zh2X6zdOiz_GvAVrD=$g*3chr!(@Li{E&Gf?=X;<59sL_yMN3^AsvnX%X(TUUZH}0I z-=!|MV!tXPh0NYi3a27Ut&xZ=sE>!xPkuk`*SAw3_WX$8p_W=J>YUvc&m_DZQHG}6 z)a1)t{!$QMz)YupknrbJ@bG5Uj_(!y(x5JJb=OJZxdQAfR6;bzW`R&Ey9)Gg6$iyY zCrTVfpjLfu7d?5i6MG-^=+O>6Wodx}3f1e8+{ttJ*PxI^r~asS4lSO@do#w;Q|z^j zC`ezObaYG0?uE14kpqh#lL(}h4vQ-`s0LhKxg2U z+a)~0hjAB{z1))d64|}LC_}%1m))x5g}l>p6ZIOqmm8H_REB$1k5S@;Cn%CRdA-GRmRC)5Istqi z9Zu$^d$ozx@jbIw(KH(QF^-nGR--@7qWAlPnJ9=3LXcSEkBf=Ia`j$BT5Uscwr*p? zDV2*^OOqyP*%cB#&mSXLS_!Z_eyhC3IU)1tZ#Ki^{A=o~C!XS z-(X87s*%cfc*qX-$REmrHMdgC`MHdoy0qpA;wN&KordKxo-&+W{ z7u+`FD{7-^+)97@&r~dy#5qRvuU(BRV{2J_Z+@~hpLR~IB7YHcf|!quxp;c*Vo7#T z8_#&rN#}vRC1H>Hc1>#)aE^n)zzZjbbk1FKceI4B9ktV{Gaybsx*{0XHf+cJC-!IN zLwUjxxQ?Y%w`MH|(wCoa7)Rx}Iq5DR8sH(AuaSAMkgEIEO|FR8qRKNUSo}1~OtGTP z-o6oX4AF0+Ba+tRGAc0B16|}ll+>4MVWuLi)V zJ@$OdpRilM#P`aCOh~3K7Nog-Ys4z8HFUd#Fmm1FR1)!(h$j{u`qc52$^e1)-Ia7gdu=iUDFWa*kI{ci zckpoN$X*#(KKGW7oQZpfr-$Aq-;h#FurmDyGh)i~FtpTr0u{W^QzTTLLRY(}H6Gt+ z6yU?!J1iplrJA$W42Qy-m@YEjRUu!aQZLVABMX!Vq4Pe*Vz)sF)on43J?7QC-l_0j zskkE%^I8{?F{Gqb9sJZZ7%{UTnj^iL`-7JL_4}ibx?d;>&|I^*j-Q8{jz&bfLzZW# z`@r_T7TsIE$H?Kym=5Ma=SnvBEmF~br%v*xA~f3XaCUFh#W`uKJ|p25sEf?)1oAo)kAN4Cw{BfY-78y zH)R)zG8I+SXR6Ys&$XeSi)t@h35#Cmn?wU==%~RS{O)uGVTH-hn&gVSM^oG*v0q;kLr&14t8IToj78&=?t% zLRh!yT#2C~J2OyKlBw$iVuo{V?Xg-jL!UP)!XI0D5+=rX*5yKD|6xp32#zDMD3h@b z*#@Exl}fwXY{o0BHee#Y2#P)Cu^;GzE;p$$H~c~P)U_gtqYuSFEDH6*N<-4_^~FmG zo>1X4_f}8zTTkhbnCClcNJhNjwO{PDEk2!!Y*QNF9@mDB)S>pGoLvkPd`<7Br zqS8t_w&>_DC4X6-)wv!>ufn8>b*R2lEG1#+s|TNYzBojXKpe6vtDOW#U83?tZ~PM2 zKe7*Zhp%T&LsoGy65GjF7jlrmKQh6{JC=Kl0vxeT z8%Wu{D~_!Jo&(QPp*j_lIJ_49*0qVyQO57h9Xb?xaZD66KIY78#TlXcB#`fn$osyf zfgd?n;zA$dj((mb`s(;A@IUTyUx|dNcvAnD68yhF3^Owe^S^7>f6LK-Yu2osY@GiN z{;f&>4gNRA`PYvB!7cv-YyLkljvTG}U!vk{;z+M(XJhcGE&qGif9n2l99;iX0RIoX zku@;-XV?Ek{sU%MKTGrf1~Y$)_WuSmY;5dsOrNJ>0(@@$gLwWuoWDK%t1kcli#_}= z7yS=}0DgA(_wxTz2skbJ&)Wl_XJumpFtc&71DIJ@S#<#aFC60YmVcsze}f2Cmj4D3 zaO{77{2vg(`MLFff{0K0@PCFS0IZ)|{{a!qpAhPw3;72`aQ+?ie}jmB0{ZZ9ymtVS`PrIv2(sxdE zI~STO)Z{@S!{TWIYtjN?CHi_dlKj)+!k}}>EiKN@;2cdA9BP0~&`iL(8YWT&g%XX{ zm4Nrwh_88|j;^(iHHT01lqU!*u8`HCIPC$0^K|PV*F(A>SaKjhT4Z>1WFWR*@G)bs zFDy6b4ai=l41JW~=ZlMKbi`4>f=dE_rqm||7t$hcJye63+S*e1)xv{Jj7)CKOb#rJ z4Wb!CUD?|=fu?>36{*nH(e6z?2bsMuiu9c$wzh(Ql+J?h!Px_IbcIfP)7gqawN%1o z;-RqhxS|Bk>E%rLde?yjolj;8>VhDO`Nh|E2EJ_4y()nMX~DslB#V5%iy<>HyMj8o z)_e#*Tzb4?e(%xqhjxOH2sjh`e1~rOo8up~A7uEM^k2KdMg|7PCZKYgoLyY@K*1(^ zwGO(QzUWNgoC{Wc#kKDnIG+?DI6M1#U&R13A@ezMB{e_Tk-jlP&`0T{^b}H<9t0-v zv=8`kymoFacQEI)FKMZ%pfcS)J|9KM(G_Wdot2A34vMqyGtMK30t$y^^I@k3nd>)#vj)o zL{;GAk>?fGo$m}!@;I`;s%h);Ywm19Qk`2`Tba;U+Zn)vHTUVU zS?rS-?8CAs)j6^?LZsrYw)wp`R@vG?IW{%cK~roG(gh&JGd8LGdlL{}c6xST{!b>u0>|Z+zu%{2h#;t#__3h0;#C-541nE<-4GMxaQ|x`S(N+MI|?Q{+EJVIlqAQfgDj$vg_J{-Q=mg zE?}(hLp#hb+t6TwgqDJT~584w<{8=)n z5%H!XFS1gfjG$~cxNxRu_{(&;I4`5r8HA-k&VwP_9S{3`_7|Snr3#9*nhmi7&f#ih zHdAnB1aFOoAUnKG!O+$y+vsv>bT{J28}iMsA*!Rx29ClU3Ga;O|CyI>atr%HJ8 zuvY_*S|b>4y7`$$HoepG8|2%-p`|18JD9jPj`R_7S&yWbF$?(nNR@|V0X?r7{cE%` zSqp<<$88;W8B+xV7i{$VcIj1b6BNUNt{RPUIz-fY#9{4Xj-VLVNanDMnwrEA*;Tj~ z-@_d#3n@(%_b&s#km0RMRKj-Osrr{!R(Dp%(w5Vav{`l3EvUY{P|$bU$Wc*%M0Vl( zo?@5ES?O1q024n_n@g`t@ln?=RDn6kN0qx@5LM!zfy~e_$Ypyr#iYqLp7Na@4H7_z zVPicc92==yVqYRuVgZWV@)%x4b)TqCNCGAFFea-22z=JN`8kCZr$u*mwLE1OgTT8+ z+ZRrO4*k_;Kp}XgsMA?dz*Gcqsa*&BDpG<<=F667 zmsWuj#*SA@5(a6}dYrKYYV9_3P7?B59D8#JT#n1sHtGqPcN`I6`#tQzRW^g>h2jSV zRF~W}v#h%uXcAbz1#=*C_CA+RbqSPifE434&sS8fJE#0{fj4IVJUw=_*xye-rby6u?^2Bb@AhU>RW5|SO}$;qZO+!=QyMca>?C@)=_CF_60ojnWN zG8?ArOmV)Llvs+N=TiWX*k35Bk-XAOD!3Fw&rSdNbcVo<=H&vtyNDsNd#E=sLs%G@ zN5T3fDIoaQ=%nqYj512!;CK(~YkJ|mQ@vj&+{2QRdXD{rP?r@ET$;PZj}WdqA0`iy zCb|Qb6>!&wE+ytNg{v2?`RaR{M56D~aXfM*acgy(Uaq2=zs+VRJr6G&e zg+jD)vJ2plJ2D1JRikwCZumJ&Kdbxr1ZA}B+Nkk}h2+BzWbe_gj&9Hx&LV+kXQqk6 z4ICA)pK%oC1}3k|vtb7)RZim;&-@z{fjT2;n|4P9>3c#sOu^%P^% z0k_K_fqN|jfMDr>rnu%D?9e@T%~p>Sj#3f=jKs7514lr(zgxl@#-_;Dx%wR^+V3J`|2XqSV^U5!Z%iTGZGU4aU^zF-QlxW@5|N2G7uJ272K# zE5$#z>l$FRzKojPti6m2cWJ$A?@!VP-+RIvB{{g0;?ck}I+^K;0M!-}D)q^2OrsMa z;dUM?=saVG$q`?KvsVu)g{2_?4I(=8?82ynj6Num2VFEjIE*9!# z1*G9#G9*!E-yXp0nGH%xw>IxsDMlrvit<#NdH2xzLylmvEapjuSs^R~-85HYXTq(d zVUOaK55UZ~9VkNWhw}=(SG%eoc?&~_!vvee552#A9P|nx_icNzJkYUlrCP#P$;ImG zxyz#UohbtrS)OQG?*e~AOC@IsyE1ebWX@<6fah{A$1>>Z_T|{dXni|SnGGM3!_vM= z2FSryX~gGGGdKzrOxY5JYQI-@kXo&dvd})(s4cf?q7(~ND#_D4r6m9ml-&`0Fxb4Z z^AVFb+;VC%p(2f6%U4}fMK(04aBjK;Qo(V#ZqrYm#OLBo^Dr=Wc%47AYl^>8cjHnj z=G4$zEi4+WR z*H-1xdY6HIWnDXhDfI1`{iacFIBGys`nDR0SUpQBwdU>aU`jyY!K;JLD*$`TbNXjJ zd9KbVw0;y3A(7oq;z|L7yFS>3d#3Y(AJ@%Qop;o&opYX3BpNB=_DiV)E$g{h*+A6k zPXWQo(i8~sfk>Y5T13N|dWhBpvWjwk2c7oxivvRHQCxGAvg0f4Zw6cVX^vu6Dq~H9 zkdD@tMAb&?!hV=2Nq~w=CsoD6Ncd0&iQW*Ddj-mC3a#6;GqM$5z`Zeu<}bRG4%&Q1 z8xzq`#1&MOHrFiw%h{26gxAoMjl_t+s8^LwyvBf8L8lqfQ1 zJ*V~Y@D`j1ni;(d-Adakc1422nUg-U8%wl>x8L}44HAxhx;!cSN(~2fJ9DE>LE+nJ z1MI}O?dr?7k!OA@7958e7+iZJ6>1?nGWvou({FQ{HjK*)cg?rwlxp>pZVXh2^7S;m zdgLVXKaaGVF2S)sWICp1ka@&~M1}jVeK1jP<&((KUx9|k+CJJ5I<6GM6}g(xg%IcU)o@2n z2l=>Rn=y%GSGW;2D~|23xt8YF`vAH+OhaXsjIVkOO2pATxesHGoOEk0W++QE=*UoA zQJEg=SDfL4ExLFuN8diAiBdf~MW#cEr!;v^`5dRncBE-i20l)6ph?{IWvP5_*l$*t z;}PAB=_bJL;$$*9JMehYG1M*PvqB3)l2%Vl(2%3L^UiQbq@hbnmecdXV$JWF+;~Uc zk_>pTS%HPsDc6HbH()!ftIp^VPxXZ#LU1BPe|=W&gcNxcu)9JTeuOZ4&>`KFbFG!) zxzB=wl2RQC-)0&G{HW(ruLhYh3)*8de>%697uq>Ac-)b%iyX1mX6LoS|%?8OO4oZ-P)(Z~VGPI6k7hY;+NZ;%U%TS_%TXoc#_B zD7)f_-y$pJ{nlUuqEuMl@Ph$F?k+*$Wt0@2CmX>61YU`+%O|t>M^n{fkKN`$Bl}(x zVb%jVtWQZn9R@8Ewew%IdGqdh&UO)ZF%>4>z1Oa4Qopk{s7ZIeh`JN@Uk-IjUX_2A z;gnFAUCHYAr%p`Do5AeBhjaG`nx-E^QZQ#Kj`h=)nd_mdvl(YiuyUhTIQj7ju%I*r z?S|yN8}Bivj*d()-686(amI@CZ32s|WZ(TgzaMYEbagOMA!FCiWPG~sR9i1344U`f zFQ=#`(8ACHNLk@_${kv|@rT>&`q`Y?`;(!JF7$C4xB)Xr5h&TkRJR%|$r17v;b|_Z z_rYRkZJ&A?jxneW7G?$;6k->NJKv$H@ZJfNVH_hrZLqh(npj=$_Upoxn<+NS(4cG) zYCGcOZp?&3)k!{IL_3TeWN^9?P~hh_uqXNzu#t_v-6ERMV_qwZ&=~NRsKngIW;^Ah zhiS}JXZhG@ZxL)sGAWN_p}&q}$2@%N`wnh`O~aw?{k>e9x_tcxI;3#(Uhb&X^~CKL z{6ZtN513r+OJAo#hUD(l8NF*5WrTx;f)t!!)N)GQRfOIl4;Cdk$eW8M_qPth{1dOi8WLBB{M&Q5@m_5!K)uN}}-L?Y3H+y2AUo1+h-1j4!YX?KSxh?lbgL(dYy2t4!AsyA>p5&jH>lpB7;;m+gTqZ0 z1H`jj@%vgAE?P^`H^$MfMN6sBq9wHiuqR;{DcQgf(x(%YLKkC$esLVS3EjQwQ_BCA zg73BW_B1mUVmtXR?L21QYlZTAC2xw+*A2s6id;X$*7==~iQHJ^@wh(UmU#k0L-Usa zPLCA(bV>FbkG|5r@r| z40M=Ewb;{ThMUQGg-N4-Kw-4Wbc3xjm+-|kH*g$Jif*j@6*cr;Do@rCixVnh?!Zx#peSVMDJL?xENc+5M>S~cAV!p4xRrnESllI+~RsUoS zz*0rM0j6N|_+K zgjx`lio>*HL*b`zUy<*~m`gBkzInR{Z8$)A$^)lToIXpXALMM1N~m-hJY>A;)ngVc zYcIejV8vnMmcG*f?jPqACU#s(XC)Xdc7F|gGoQ43vt zI7Uh7>D^O^!{5E}iY(*M1K~I`annU7`QhYy#S`&Z9W5J|3E9>X++^l z8|p}uY|*jK+XQOtj2!}=oFAR9#gEZOg5g}8hp#z3A0vAVXX6H(vVw~#s=uc@hHP~X zV6>08^N>;G;Owq{Ev`FdF`jT&Au&)NUNC5)ye4v)TU3s>47C?f4bNXtvP`#w!o+MZ zx?pFYugDZqsmbo4rMH{^(Bs5=-UpSwPlcpzwWm@gkKvBUkIPXl-@2rQQ=LCGAuk}4 zZM(^!z~T}eCN5-FjsD)N3Cgx{Mtgc%92c7}BJywnGYNNSe#8o$23Wb*WR8 zd1F2iC9}8Sk(L{AoH|}~M=PJynO2L|{63+7kd(GTM+ufLYj6Ys*2jSHp8pC1eGE6d zWE^fY8`%E(D`K)X1F}nvD_cE0Jrh>av6Mvoe0P;GeRIL^IHGyvSO<)SssD^xhneoS zNKi-5lQASulM%qHZqRi6)1!or2u)UJ_IE<8Joz@FrfL?_A8k)@AN_1ybT7p z`a$(FpbH&(D{50?H6`ymz5O%O@ui<`$%KmAz6EXU8qE?(gSM{ZXoXJ?-$hrt^wWo{ z-PlZ5Cb4=vN;=&GEwWS`XHU#vHuj>lcBLC9g2Ft$n?a)X&s1V9zSft0W_v?}vnr95 z#ypR~6CG@GUh1$#n~^9`M4BjGzwoSAPd8UPov8fc=AT?B-<%E$evh!e9M?2Uxvh)s zSYq`?5s}=t+SX8Bag3$B=zpek)rxAr;9kcno%E4B4eIiok*$098bC}Lmep^p1;__- zze|+SO;G8YY@`dNxeciK#!Bx^P+I+JkU8XQl*D*INrH5}jY57ZY|J=K$0pEqf-f;v z>!+s};<}vG$C9`F2bo9}t!{8mB4kIEu;=@9+Ar$$FvrNF(jio~hlvKZO^=6EhmC!z zccL}OU(lzudWYv}VX2eXa%FkSL*(#mJy3_`(GVV3B4ZwNk9`R-0ws@}fgV@cG~XP8 zm1k96F}aB8tfJT#+S+{@nRZ`QEE}H7_ZW(dA2B(|>)f*Qb}V8`jIN4kKH9r}J;r8G zTjue)s!CMoNBiujQ|1VOFE5d(XNG{S<1aBLw41=)A(U(HMND{9}!vAMnL4Q zw&nKT+A}#8Dx+lttk`wNpLq;l;|016=m&mMc?MZry~St3pl^D8BpKIk7R@L!J6Vq2 z&s2H^F}^q`W3fiD8xYkAPi<+|K#*Lx^2IO_KMr}YInw#4(tOzhw!hhgPs=k5hsIrD z%=hf*rReK<-R6{GU6+#U`pEK0V3E4i>j2PD1Jh)?HqDznKLbP695jl8vNh3Xcj;C8 z_Npt@D2bO)Cul1O-wVi@MGv^t+pT(YO$eu`_HR90YYQhWF1^y`Ig zdFnmlCW^&ZvuGVn3jEJvo8zphu^GPA8{5j<#?$(;iE2o{4@{Bc|S^PL%cC**G?#^tAMLc%2WJ^U|-C`4J z1T^?IU6eI0r+i&(B-W)wQf)6H{`h1UunmEHO%$Z_EwSIkC8j@yr+eQjn{aR~>Dzy? z-0RzyYpHyOE-naIr~*PbIR6+8=AWFq9j3v1{UX{T z${H&MG`qBH5o{xPz=cDv_?mVyr9(!*Yp4T(VvMqJ+~Br%n)E&dJsDNsR@lj&3*Do- zjPhtn2KKb;C+QXM+Z|M-rS+^P^pUql`9B;dV9;t}5!$^(K=B z6be<2!h`?$i4P&Jd0H%n!u%)Rr>fUwJFQW~{&DiX1yCOk+*I!e4GF`3ipLuCk)cKn zcO5t+GR2v{+#TIps76Z(A*D`1tm@*oVY7yHT;qgpP4x_avLJEieK2JGih0e{)LEr+ zf6qfR_xbhf;~M9sF)wrvi4bQpX1lDE^f_d1=3wz-ks`;QX?+A z1iwnHp^0FRt}aISov1$zbBCEJ>)4)t%;27Oi)U zX|PWLbesG>M^M|(+y^#84E10>BTNS{2Prqno zJ^OwvC}W`vW4E%M>@b7ot^zOHcr$$3M;PsEv{je{h^5MDW!ZvYxYG(m>y0cmW-|UY z<=UD03Q0+T2;sfJc~&i()6_UJa~!KCmY}8gT4(3jxW2NOF=zV7kBHlpIWFuZe!OH& zW-F&fy*m5j8HGA2f))(+Rbu{Pl@MulNg&R3pfphnZy~6LF$BK*#87lKsV=LpcDyAx zKzTE=>jRu?6%?-przu`yF^pJ1pX`dVtu3Sa=Vh7I3R}VGz&y0;1$5tHAL~@mm@W8m z9hn@>V`$m1x-aSF{s!~yo55B${9(;X&(D@|+cC^@Y3-1qse(l^u^^LF6km~T|5vFX z^_3x`S-NV0Q{O3jDC}e=`?nhmso_tT7wEK+c0BWf7p@TtP~uuU$Ich{LSllNKJ*zy z*ZS~7t(k$O0p4BfQ>cU0U$+8q*t$v^86VKOI(LlTx=_2JZ+x<+-$#AJL5-e-d*528 z_1s`gdXUvwLs3!Toj5M1_2kfM#r1_3@6xD&^@AZ5fe3a|#8Lte`mQ2xVY6DfjiP#F zz2Lr~8BfE78-K5;NAReJ!caED+AUvbL!!))>H?L^f~oiU?47jjS}c^eSt771nN_}W}S z!LThlYc2|1Z^%MA{KT0lc&#U}S`#oogj6RbP|54ojHZ}YgBBiQj}yr`jb>HqwHGDu znkHH=346YW)(E?N8+p~xG8u9NEo+~}d-Q;=*J0Qn@MwP5UPSJM4yQ|9!(ew>DS zf)AbS#{+tyVN|_F17~qoZi9^F)2WySI)fC?AyelY%)}zEh2e5#`Bn3QRJ-?reIKKG zvGmjjId7wqkF~Yb;s)q}ohwbs78`B;KZ&J1ObZV%Z@+vbD-Ka|l2ByQd%2!3q3t-nNn0U6_n1v@NIz5gk(|dA`CdN-JSWH5k-N6x z4CD(j_~;AUd|IFU1=f@WP(n29vJ)i*T}e$!qk~p9f$qU(F`(75J0He{Ee)MzCA8;! z9{**CjSdO{#kkh^;z*|bjts9SS zVw?n}Eg!{^O6*r5E$ShaFC%0zj6bv3zj6w2|Zjc1JeG) zKw21(B>s$o#+J8JG<~;fnX%j_+Efx7vkK^$yHRoWc!kS-OU`ff=JxDuRtImsp4Ouz zC(7Mz`RtOph+;*fgM<|>-7eEQqsg&mv!tyX7iIsk#))EJiA|S5@miTgkl3EtTjxM)RjZ@<#le6N#~Mg6Iao@7(s-LHC4di9;TA&cba zWTql)JmkA~n$A#x?X;0IP%wIkwsmTeZM7#+O!?g3P}-&Yy$~HpzI0>cg#C1QvLoJm z;L9(9d$bxN!APmS(xNOYljMBreq>0}B7T40#;)ViT;q>1+MeC>^i5RhJk$9q{#%@- z5|__M!}oFpIA=eSkxw_j@EM-u*G#WQ92Z&}Z!TdePPfHk97C&n#hklcd{54xd#Oc| z`%FHaMykyNj8=zIxR6KPsa{kRdrKfS;U+(XEP~ruBVi$UTPk$>VhLmCno<8nVvsJX z?gO*87N+D2Ks5g1(Bz{*m$$KUDd@Qed*mQGe2Gi-V*WnmU@WULe-j^R&_2&J?KYy? z*6BUUJkDXk*vyTjk1y(*;6q?#ciT7bbEIjJZmFEoyiBW1Lef#lN|1<~pPjJlx0fwL zx3oZBd_ za)c0N)hmh@KdBBhlWIXA+C^5Kq>(~{9)$tBMRx25(r>LYRwSNKM-wSGV?Sjj<`*g+ zJxdqyaG@`(T>IrlGqCeYnRq7lF}+{p%BC3Z1!Au;#oFo2Fxtf)0DF2jh22mg|RuR?n{+94B{H$K3pd1ST@gmLCx4%uv3Ti)yAOS zNo+-kX_f9QZ>QB;*sAQj^r1x2H^|1(*%N34VrLe{&hNJCAad{US`=)1aX zgiHlp61m>6%u^{6fp@+bDZ7D=?}5&CWa(R|hB^L#5@NtMfY~SEbfp+-$kNrNJ3zt- zVDWzb+mvJUnRH&w*eO23?h>?$d{IbF?K)gE=lUorrY4Ksu5Hhr-)wDZFkTpJ=WT93 zxrPn_zG&L_ARGbQOooT8S!u;p8lZg&WKytdxjxMg+@v+AAYFS3 zI8Q2=F>vp`3gR5&47}e7pD|1^weL9tcHNJ16t=N>Rz5vZrwq0j<_Zghj3Rps4n^n? zlpO-E=BF^ZnCb%F81>ABL0gtV%Qhh~$WwIWnMIRDcvrp;feX|f&CJ&L+0tXSTQl(1 zG*8m~7I7`xmQFqGplAuVL!*D3LM8chhy`hlb7^2vY*Dufbq~ILlKDpfblMo4!Tp#p zJpITIfzWKJ5Ce?wuPzB`T^^n3=K9T{ZGZN22hb|BEZSPvnZskK(w`A$k_8E0H{OSG zoId#%Zm+B(8QR)PEc zjR=WSK1~oT-WYYUVn7^o`68sR!n+6u@8X}3v$y4MB0Tf$JXsZ zmHFXCT0x)tC7dQ`)Zk_=tB&*ITZ_{>XYq>FsUvyd8TuQaV&ousb`H8Qt|!ildk&4y zVwS>kgi{XrM^0z+ecv=EPhC{H?xTB*C-j5|UB8=c`x92JJVMitRP(lrz*NuYA|9OB5^(30X@k$d}9!?u_)QGCZBd8=Zt1sP>Wg1jc~UugUM{ zj2)TXz=#PdD`wA@6s4z<2)dwV=&LqN0iy|?OJ_pg9xvuhzl5LKUvWP3B-2>UjBQ`j z-z#7w>6)0APDc4wn};G9U8)aimxpm2d;5wucnTnXdSdb^-og!vQb5ipd~R$F*Ry z*im2oS2@U;=`wVRjRffi-(X0>T}hvUq3DkHv-K5)f8fI$vE>69Nj_^f3KFL`@ZTOm8h}D|rNpEpUlx-DXeX1a5iYR4R=cmg z(B+h4enf}uWk<|;Bs?J~@}S~PZqNvL;vN?2ROp0sZ4LJKYfTCGRO8V3>$?Q*-W6e~ zF#bH`0bKaY9+Yl3+S^@2>8je9BaQ~CG0!q%T0Rg zqE#o4_}JxES6wh}H|gd54bC{nc-BKrlOC=# zf|%c<*K?jd#ss{d=|(yp@i)aRbY6&U1 z;~c;R;3gQ^!IW9!`)2;2X(U5@3flgxS-K}1?(uRdE4$LjD#F?yhin;PZ3mEAfu%Eg zUZfdKO(}7FeF&1WH`rp%uzW<4%#e#^`FT>O@mBWffTc0sxF)-?d!DHD+VuK3**S-U z@p1t5(6H^%Sa}hD8{np-S^PY*m#>XrU&?H1NaQ7Pf6C_NMfg5NHrL)fMUssq(U?lJ z{B-jmf9bskcluG}_2xmpZSuE=Y!C6X47qVd!n@>D3ApR~VQr6pxe-Gv{K+N6C?2rAj!7#E@9?(TmlvliZ^cFN3r zf7GjMbGG}jxD+i5sxkpJDJ;;MLsc18{KXtQ%D8FXByxh|tE=k<=5fKJhG@e#l^#Q# za|eFo=Ms00gmgz(FG7LAvq%g|)BGA`GcHupHZ$>i(}7;>H^+TD#v2`- z+O!z#o>A~swf+@tG`y7ky@o!{LE=l9k%w)VlZPh4HAFXuYw!l?eJ4W_5iO#0e)ZRFm%z3xu=Jqi!J-2 zkSwbJ=i&*DoLZ_i>7J>jdE4aX%)rN=V3JR(N~}&9Ojr!1DMc zfIc{uv8pa=p-{x)ZF)g8C0lcviC6_ME4RIRi=|J)VRY)B81AMzMPH=7Gf&Z!Mi3OG zU*YH4jKk0<^$59+*8QE>q$#rw2*_L+!V{#>$IWC{l&4i z#e27^{MshG6I(wmlxBTUpBDKYNh964L8G155dD!&o|1du=R!}VIwbt|HT3Ad5bY>V z(onV~i0;&nNf^i@6Sr4;u{-VKo}Hsttz=ghvPWKkF>)yd#CL%tv6OvPv*1QOITBdy~Xjnkep zpJjXPUMj7(=Ulvb$CdM?zsPM7S>s4B^~Eb|#~eo@W7e(kS&LNvqkq1BxH+DR(LHqJA90*dP}U_sCiRwY;r*-*X}t+b!I0z@ z!bgf8D{;5oeGXAbrBWxv_g~ZzR!Ke!J!k3GX>_lh4FTrhO94#2DT;2RAxrU<`)~J- zH`Ziyt=L<+D7R<*J_f z3O(K`b&*S=Og|}oyw7(;@??`P+>FNKpw-7^tNnOdqf&A6@xh2IYuhI$W@W}19Oey# z?Qj=d=me-&Qt&Wa{xA=mg<gb<0THT7tQc zKyKU;IGLUw^!%WCNK|^>z24d;Mo0|!I$s*-A1@pw@1N!0zG_FvpR$K>tqh7&d=qXs z=6Pjc#{ZM9*by~R!bMzZCC;5&^s|kBYD0y@3PjBFqY}3Ny0D>`GAz1xK*CwGP@LD` z1yWj^&a!=5>uI!fGw?QZ;24o(``SzKZZ@7~^Ma;-fs^sO^iL>@9wCfemck*1b`GdX z`NKTBd3~$bCLcV3_pVisL2zOL`+BydRF1|BGW8N*}gTpGM~_N zwceynou?sSI_)%yF^d##2Th~qJQ3X|C6Z{K%0fxZK8nZn^b0Bmk`oH6PyCTqLMxYp zGI8ekpqSLxB?U-1Z$eFF(9Z`zOMYK=blk_RQ>TA4 z*xOxdpkviNB+6bXi$i|cJpUBb>?62L-z&*rC*+~G$SCyW0h@TA+41DuH&^F0tX5(2 zLT?@@JKVJL74;-WNwnz(FMFq$ruhCbca{K>e*M8E;|>|i$$#G_Bhk_2$`4EPan)gqik4I0JBW?PUl~i zTYuz*$V9Vg#SRy~x$9bf(u&tEQ&9 zY}}QPKU=;9H3(T093%JiY1Y;^R;W(XTLeM}^&b$*7iX1i^ISA-m@aoZ5731%Y5-s3jG*5&?rM(51ycKr0V^dn~IAOOY zFZVjpO3}Ez)Wp&?gF3Y(+~kVhRvmf|!k)$oHj-z0wxX&Dj#h_v&Q8a3@RJM|vrgh& zT2`KAo(ucQW)x%M-9;a9ee$>}>TDhm->CoCWyjDLQ&z2-xgXxRai7;xdNj~b&7o-_ z_$L2A;OicYYCu-#+WrYA>_9B%x@4Jvz7ll$2mwoIvKbyEm$yb*FYe?e2BTkoR5e3M)K6k&p>;soImI|*!|2HeSi z{X}Z|m>THpV7gX+lAm+nx-sv9s`?WX0>mza=I_v><;+f5;hVI+&W^Hk*q+L89tac4 znnGt4MqvyHrin+k=SYotCw{S$vL8Q`$dVphV4C7)AxkUSZCU(^Oot|$Uok)!yE*`y z!qN9-eNAuP8Oyn2bco)F(}d#-`2VO%Sj@4<#8#g1gh4ga6|u@jgGWC!mIP`sw|>65 zAiDJ_-*e4!XJGn7{#jU11^y;`f+@iIJS{l+`F7vpwNzxEvk$;3wwQQ01p!?It2anl zjtGB)gPN!6uEJACdNUD|%iT(d-8^2rCkk2g5Y#1*ADn5=XrjI|ZZj1moX!s>U}su>U1WeWv$a3d!j zuDl2~tt|2<)!`!?5^<|B6K^-40?Fj))jIOg*{Lxf9Lf{foR{m>yp_K6!RN)v^wk4{ zR=~G3G3}kFJ!bI$XQI|k-|%^lzP?Y9m_mz)7DKJSEuMNjz4I*wu=LzH=oLTH`V+n7 zh=Kz$c_N9M#&yIb-#LLO`oy~%qOl+(J@Rb4aO`)Q*Xv>Wdlp@^oW8AI%&o5IvkwI! zSr97lcs{akmEO;pLA^4}6o_ZAzT{o!nj{R+n5{mU-NhPBN*MgGtw_2WtrS8&>TN_N|>Wmkzr(pL*nZn=}@E9Zhj%b6aV13k>7dukw90F*-8 zSce|%cl=-%X#4Wi_;k;K1(<^a;$L7avreG5p@hj@))3x;Tw6xCD&tOjY{q4&L!V}7 zf|!!tXGnE^U^!!vCSV(py$j?{Bj`d5A}pE|=Y-_u!G`6q-ZbSz%c5@+e=0^%v9;BH z*XBUpcQF#(OJ9bpYL`fHx}U~)BWe9g;R?qU@q)|Sck-%1GwF-O_D)HVOOfQR(Z!Fz zu&&+Ji=hME(r>KB0YuTGwTPD9t^p!jbK*1}UIXU}>0N|ck;?-&$87`iHtlt!kH`ik zF-2QnD$j7yzS%?JKM$DmQE9Vvv0C8I>4n70bg;`!1~qtAI-5L(8jjvZhlg_b3whImiZL&s`XWzGQFe_- zzSJaxa@2aaVdydKCh{yIsb)-%$%QZ2w96QZkIvWDYRlRk_{?$S@$@zHd_-KMlH!RV z{fj7v4<+u`{F|g9{L}LPEhFX!!3Y^5KE^JD${Y3Cn^>4 zpm>o$gPnPY9ENfJ=r?Aw#*#abc_k1~HHurOjTBzM(b(pv9cE0Bva=#ZY}NmkQxa-? z@Z~gsHk6`y*kYIPW1K){ZQyn8bF^A|0N&@u_Oqq81dDGatt__H@8B!wy>)PqNrt{q zW|oCSVMV*UZHj*wew35`Jcb9nX?;mPPQ57kz_#6Ktm7EWuy0Kv=)2=;c=S`T>t%Rb zyg(oX|1s2HoZg8Ax>)}6+!Gw-`$0UOJK_7CNuO3MHX@OI4tuB|$KXzf&1w^t<*1*PqC#P@3X>Ua|*nOw9$x$eD9eaNa%#$ zq9K>X?KO4B1_44PhVv={mL$%^&-uLYVX)Pb>M67w$Bc(2yi3d_4)i-8Ye>yk?H%pP*`O?U&4mRmc)@u{lpnLQK`t> z@=ur_>hAK+=#*Jm+f+~Syiy6Gu|g@$W-i(EmMrq3v*TAHDVKy~U2u3ZW~Kt?h4}`z zQxg-XvFuvee4#t9sMd=cqrqS&98w7DI%zKPyu#h^dEt8AMxq zm--=VK@x^J7kVgqgy5uby|X$fRnAzcyhE2qHnRCUhAq<&K{qE-GQEs2U>ny7*4>=r zMns0@V2iBxxl5!2_J>vXFi8Zff5gD4N8b3I2)|cvNO?761_In&HT>iAjUF`u5ffQ# z7Yx%I_Tf)I!i~+OpI^%uCWz$Bi@A~5+Fj)a7gvQDgVfe&Vh(HX#OIu{_J}^NFWPy_ z!d99Tc)X=LJ#egF?CX;hE@EE-?Z%_XOku*=dp&)4roD*G;qpkYI4i>)QYb_YN+qP}nwr$(C*|!Hh>h~3UW#*2^ zP{A47?uHw~E0Bg&g6OUuS4AoQ;FYa|BvA^*h_IWizzfJ#_rbw1GaV;XNl^f`1Zfc? zwuy^tEup_30wxUP#n6(JAlekpvlUrN5+gB(k{J3WVA*HlP=KU${7=NYaYF#(8?=s1 z7;BqdUm3AN5>Rgxb*|72NErtO1)$oIerZ!H==6CZe6fdeEjio8bMa)>f(5kMQ4=@v z+mhN-nL#+4-?AvkL#5#Qp`rraZR;>rZ9VU}KpLY5TmZ{HV;j=piVpVAm1zb|2)2>_ z6AQ_r9bz4}t`+_>_RB*-U|Y!oBCtQiXeqHTS4x)yX}im|u4DYw#IM1BI<$rD!2wk= zuIhOa=I(G^_;__Iz|& zYTUvq&<&|A*^)dm{CoaK>45UZmmf~#-0AF}KWT%90tUYVki$^fs#LEY_59cO2m21! zaY+*{c4+oT4&W1*ph{40jG_D*4D6;migW2P7U}(jmis+k3SJ-MdjmhclVRhf-X7}o zklZq~M!oG^Eujf}TV^h7!V}R*uj+(INrNb zj?lijHi|PK#V=8PUE^DJQH}vCESZpQ2f+2AwdOZz=?q}%+9h%1zwRntIp-sM<18L| zyya*{mFy@34+u47Z@Z}`O%DTLvst0|$p{&AzpjT(A7VuO?0xxS8}v<-^0b(Mg3<)V z^V}Uwq5&xGf^BN(wN7cnDozO6-0zyfo_Ud=0kBREq!Vx;e+>JHDS~)IsYc?YoetQC zb!AnBOE+7{SICQ{xQZ6+Y^^&QP)sdc$&^+91;TUJoLm? zUYHu}89l#l0?Hh1yMdd%SvM~a^GV@qR#bYtGk-@Vz6gM6P7I4)aRyCj{T9sD`a-nX z8DcIJoFJedcr6kscFtYNAEShGtZ8L_fD3Ork@Rkc)W{wW{1yBa4^wL=F?D z2`n%Ne`pQF3xB^T67QCT5c=z+MDWand3s^vw~+DrosXha9%w{^Bq`#Ok#QgJaNjJm zr`C9+@GDkc2zp z12B=De2J>R{lE0+rz2)jK}6dtVsyn-K=(`1(A1;ViES2yoO2tntkO26%9R2nD{gDf zh3Lx>o@5B<$SZ;-tug1Qf%l(c6z!<)-*ipc>Dd-vtJYR_E_qubbe{l}#CQis$c$|7 z9s7>@-i1B3n@I3@Rds>=989WHZ1ynphH49rBcoO1) zB6M4J2ky|FS@~K$@d-Ctzj~xb+2ex;$<3!} z_nFP;jJj@9`to?5`na+P0*W~RKEESgOw9%zoX)WtOiZhh)Ad_Wm~sQFDn`4##q9KF zM%}LivalM09#;&PFY|NhWC<4#EA$_YK_VR(Inr&q$v`VGc1LZvO4HZV4<|&OU)>#P?>blfuXbt)%Q(cte;A7Q;EAN8n30F=ZsIRjZXxEOf1db87<^!`hx|No-k_^)}MKfoQVvmJlcHPGg?7wem! z;>C|vE@LFpR7P@l&&&CM*Cu42*FaOhgn>7T3H^|afDnDIm*;SF>kGP3tAVFT(0aLC zNPj!ab_!Ie1ck+TFN@!%)EQpg+kl-y;nwf;N}j=aFWauF>}dzyRD^VavvCU_&OGh( zE$-Dw4}A(4hL14_1VAhlK!b+|v^x&YqDovCQYKdKB6&=0Jb#b$0o^q&CPja}ZC z=JtsXhd{>v@fH$XX_4QJpu$%s z0ZxNBZ&}>PxTgqsNrA1^nr3y`_{Ndu-VAPsJ9 zPj6Wg`icMCurNTJ7qyEC#fLTFt&R+9B$)%~&FUCThXDUcv?zH2`LaH2v-lGAq~-p<9G*QYD%=LSv|>$B_b#s)bwz% z?v{FNke8u+Y_3J2v`$AdYIQVMbik||*cIg(#;S}sb;5RZa@9kXLMlgpQd)bY)F}G0(pjo^N5Yqp|v{2ux&g49gGW`3-L}A)t_J_+(2s76W1P31FrB2 zs1CO?Bnc^$Q!s5Bqp*}P&drzZy5d)2?-u;pY8LMkP<)H58wRvmHFCq&?M%|D4gin0n7XBi^h)3yFR+vm}Ido#7C>qjvH6Ce~~noJYu?b zt<}pv1@ai7+Izs}&0cr~z7A?{8aYD^Q3wy~cRAVgGQ-^?c^a{K8YkfP#obB-KBNyk+1VZQ5brakZ1F338(APMVE2$hNVt)Ub@y- z>whk*-2NdK)`Z$vQ0^pS4=dPiV&!ij^E+Pu6X=ypj$(<*4jh3nkdakN_e{)Ri$>II%`#3xa$x8S%E~&96PacNBLs}PPGdFAVsz$vUrmek zu&9`&kxt2O*>3jC^`G*qiLvyOumwve20QH-v|nThbRrE@VbFWY+=b_D9mn!vNQk zSB4yDxsvu-)zBgTJ98|#e)c!>fGCVUq|C4!Ri2Z(^bD&0KG1@5Eg9EAw=hB!Jc|BbOL$>83 zgbKNEfLKIfy0qcw-!2(6b{{bl#|k0}u#3pn`GVnKote25acf}Ua>kHtN(cB~hKzPv z*r7L(f*$GZF0+UuU=5!cYTynkM&3VSU4T$J`?U)ehP&J6Nfd!m#tY8HeJmUG;bmjj z0KN3_2CyHp4-J{PDoJ({y5e@kOh%1;JCSjIr-g5-ZP15edxPZfkUx+WsSC@gk8)314y7+wsAGmMne-I?6HsWyCE zLTD^)G2Qk>kS+n(<@prh4t%jd^5Df1`jJM6SXGPKF@IQ!DUgaKv54nf>irWNIAl;Bsl?o19(o zkD44?C7to;K@Ao^;n}he!9_-q?`54#@Rj2=TZ_Zww!VcOw^_osR4|H@&*#<=$$T5TwZ7msy2Po}T? zVEys}c+ssN@xqzWntnYAB9O7oezDUuYsfz!J9zR&iL1vUtjiqC&mL9FOytucUzK()nAAX?f@W!ECkecSt>)BDQhC-g1sVOG_8%^iC2CY43LGGm4Z`uc%i} z9sW1>-*b<9SWi@i{BCsTbV@ynr!#p{gBAv$^47 zr#D82C84)hYf5HLp|=w36pmTuZ~kWWG#NcC)vtxI=XyMtw`liHMjJ}pJ7o}iLuJ%Y z>sEvpCQJK`>L|4ofW7kORG*Q6)4)#hef($JZEc`d@WxYZh*TJ)T!KV~pu;14iB_6KPZC5JqGD~0e#OE+ z!p`4uoF#YM^3mdS)q$+tzekTHK$89|Z2PaLVrZP6e?O%1A)9EDD54<>~1u(%9b zR-$h^B0^@&fagi{}Z7o#W!)tb1lh1L?(+KzxX7plN3?M01FrSu1>WaW5k7 z)6diET}{*B3gP;JB#|%~4V^quHxq#zvJ#Y#Lo*zMsqPE1_%13cRJaBNFcWHC_X}#l z+)c}<5by-p8D z8;qJPwvQFX=I-|1h8m%4%xh+eq+Kg`K|4h2@94nlYU)azKqF6M>QSrb4$TZbpgzKSauR@li$; zw)L3D45=tymdg*piJ7z#^P@=)BCK{)QM)iaE1-bj?jX%82duuBwJnI%&1LKGnRt!uoI4WP%MgWJajA z`jd!W0pRMjB)odFDII@4C~lnW#7({>D|$kaJdX>gg=DaYR(Fxwu53c)jD}^7JT?8) z$2Mw5%3>QXuw6U;gI{qWJO&Cp)1%d;&3bmCwhI`GfiGFEYzkUWzenKicn#k22>5$i z%t|@=4`|LS?bv%V=z&Sm*F~5hYNr5zOQ0C5x_OVlj1NYke9}3z6~AA=?8qM79u1%3 zl7!fPstjCg#489lEER``on~Jn%%SXtt?wBX=SsDS6lJgGJEP>Mfq9!OkwHPdGcH>U zt?5f8QSp6#R|jl}i)~kG3lK`crEIWrcw4If7AP*8Trswbn~0CK#8&6USum3Y6V%l8 z2qKD@Ztig!T?)xIZ+%js2agIX$!4$Y6MD~ZT&z6Tdh4_})Wy;uaV#vC zh6ZvmUg&@G0!^WBPc%TqyE0AsmIZ!;F9HkQ-)UqE0#Gj|0I`c=k~n1`zvuznKcL9z z>H2v9+OafXSW&S9wm?d_zqdEKlC0YPVn<4@b4jw=_fA5B)m@Kd#sf@dMQGK^6jHQQ6;KL zlq`6D$O5ky=P+Jp2^2%QN~y<|?;_zCHfW5M_K)C0wvNNUxy&ZaiKp&-3B)!p6K^=% z4QLs_HyYtC3Ny*5ulp;kmXH{RcOB}j!40B~c(3;@0SneXQYG0(L($|rktzILJ7~b7?4sk}Cgg|&xKiRt zpuve4pz4q7p~0c}1~U!kAlW+6fbdBx9M~;Ec51lSP~vZ^W-jmQ05IY5ljLyGoF~b- zxqP$8LasH^E&NLOZWqK;qsOr|FQK;Q8-v->R@l!1F(z+|>vMmo2T&7B3Ffev=iY@( z8ZJluFGIL*9P=os@hzUwzrA5ZHKU%i-VT8iHU4hx0-iPVZ z;z`*Pu-nMPEb-HtEk(%2N07}<5k;UB-W%7{*9pa_ycaFCMsoRngb(V>PIFSQtJZwZ zodyr)vSr!kLc@}oDFA|4>b;-X)YN(})Lma%OH*mFt}{({7UamjW^Rv}SeRU#&8gbk z$k$>_f2kXlATrj$iGUp}1%Mb8@oz4{W0d)IZa?f)afWyS@%#yOvM2t>W=+9w2yI#H zB~3^ttI5g2KwDI&oBJw!>bwMDBBjP?@Pz(_Oc%$BFG2A4oYqG?lCH&N5IQC$3Yo26 zC96D2s=Z*Hz?8F&UPEU#>z7XC5roLz7FuXOqLv|$Q`ErqhE5$PbA8jDZ8_b0S||4` zLekK9+f8vcxaL}#8s-%@u(mnK#nN$c#h3V@ruU*eHepA>PwA2sT=NFKffj6qV@4sfi=_Fai2@88df%BDoZ{OJtfs!#bf}5W`&IAHoL2IkEnV)2J<6 z@xJK()N$CTtnnSma!ns1`{hmZojk3y=1>=VI7ACqa%KIO-yXW>^w)8h-ey|p_yH8v zk7BsAUuOZcX?tKn@Wbgn^-eW3CD&}7t(($b!z*&hpO1lJru5uqP%g`1o)|~_L`(9z zD!LO;3wP(3JG55#=<|5mL~6!<#I9i z;xv0gr{JU4z{So3`a4z+q}=%wbA23cri^u5uC6!U5vMr+5=(i-3ZOWRnfvLJVUt-uSc92?Sfm|gOYh-0x(0f!FfnsAAybB0{`JKkS?GD z$R@&H`%4{7LsKXM^Mkdu5VBa!7D;S56n>S7k$LrD9Be5Bpw|!ppai9eG=Rr(tP!P+ zWNqmZn0m!RAkMSu+u3!+;ALLDJT^OG!MA($x-i^u1A8j?`pG1YRk<#D{$eGWrJf{1 zR(TCFU3M^{{wjba@BOn%?Oi*Nudfn>u@*YyV&*kLi=iK3|1shyP{;p>3=_i$_3%Pf{R zv7Zg6eYO$8L_E2N>yG6s1HQ#0hgM4pfkP-zv(>S4w-A>=wLl!Dq_kD}ZJ%Ybb4Jpj zD!N0zTSB~UG_`z-CZc^wT5?fVhooMhyJ9BISvfpe&LYh83g>MZ*YpUBI`WM{)OW*& zDj0vLpC94BLrR{X^AyE53h!PgpW`JQ30q8~D`K)pa$k{0^LUFO# zq42Y_oCmDenkK>ak+t+wAf<}ue2Y+`wBF$uKH!ENu(bKoAby`ddsv`mYM03;G81?j zfkaYbAoYBQyZFPnbN3=5n#NVjKE`pkuDzwKVl_JUXZ|%W3$tzDOsUmd8?p(vqw((!z;da?Sgh_G?rtTMix_S+~UG9+}XGT(ohEJV;cP*r2Q$%3=Qal z^tVuCsm2a5cnt_ZF`|=W8a~31A1!U_v3aTP!&ZnK0wmZh!@~(k@{GPcegeRwNBlPJ z5`jvWtWNs*f(^>7T#uxde{k8lXvLpZKIZ7uAei;EayY?I7{23dCl zv9hM;&aYPpYI3f@DVvec$(rsY4Ii+_lr98xg#jXtYgh}sq8x@2eHYC^`l(Dr%cr`N zPz?Ff58zhTazD{y*wQy&Q7oIY@Ba3~*ISBU6U~3I#t7qG?@j4(9h^<_U z?L$)@kiut0Vf4PI$d6_dTsR!tX;{k5;-f%YgBK4|5?($okr{zL*~kabjU>0hkh+!6B(Uy?F^G={Hxnv#1A$Jn_%{olm%fG=9Sz$C zxLA$u!t#sx9d>qT$Ek%NJlHk&ceex{jW6;j#)k+3erU6K;)>s z>=b*Nwn&@wf~Ztj;Lk#$ycS)Fg6r|3zTDtnNsr{vFYwrgQ!MaC%WfhO%TQGm?Q}fS zGH~UGj9zKDnU=~jd@;98nlo=leTsKpGr(NmFF!yk1zxdOBUFgms{R zEyccA3=jIMx47z-JKMraAj+8lWV081oXm&$K!5>z5S8k1+<3fc$DdIU+Ot? zRpKPu=)7%=tO5YM{*}etxjhG@&v6-Kx-bQ$<4zS476>el_k_Fsv(Rg8U4Mh#rAbY` zJr7@m>d)}(zeP`-HzziYw`A%yJ)WRsx8}PAjLACFKzDS@0)~1OE+&c9PB6JO28T<$MJdiOh6N5~E>$N4tiut3yzLfv@m@BEnX<+8)=P-j+F8bgjUBp(<{j23!Ss| zSogC?vP#j7;6)3nXkLXaeeY=R$u)bc2!1wSwg6;s+c%kZ<0ow$s19@_t7o8QUb!d(iNyAiEg3EkAR2C>)*b9!p{(UIB8e))!uS~-_CptB9 zz?k08t8k@VYNfTj01!GPH`bzo2coeZL5_C0liFETpAzSU4vfk&S)iQ&^YF*9z1h9N znLwdYgEDI>f9Dvd@;jEl$E9zl6AGWV|-K!U~|hi z=T-fMESKm|+A>4E_?tgT;>uCw^2WdA3);BPgeD}JyT9%{Lt(GL942ReQ*3h(Uk5TG z_bo4Jk$fXs_8AkeHwO@~@tQ32zATNy17;dhNN9r_W14CNo|SvZV$8@m?d4Op#eQ7) zQ7sKTUh%~ry2_9;_BGa-&ze{gAl|w{Qkiedg}^jBkxS?v@;yIyW$%!Y@Sp(=)(X|! zQ(R`O@i8Lu{kgFbXX7jmU?DYbGYUphby+y?eeWN=W_g&X4wsGc0g)#|XXp?;H9jUH z{HP*;;ZkG5OXhPb{c1!N1Bzg&`?|7IxqiO^rL_t@%Dc0!pEv7O6d%)WwBqsi^6E{D z7zm@GF7o|7zisu5DL>&ds{wD=k3#SSVS)d-aKodMqC+DBzsGy6C&2jvTZ&iJJZSetlO?c$zc5qyGpwr5b) zl;&}CuHkL63o$c*(ibu75!M3mnP2q=0u3S9{`gAE@P|YTgzP*sFfEbDr>LK$TMEVt zI22?msS~LGXY(c}IY(yvLF!)h4+3P@RZIqMKlsRKye|lMZkI7}RQ`Ky;oYX5n;*~i zYJM4XrwvzdURp-r_sgU%U<6k%Z@xG^=|w|!M+4ZElt08eCE!-y^+}xGVcRrm*U|87 z{}+fn^_0GuH8d&~?62lKu;e}}5_yXo1O6sdr~6sG415J!p>rozHTZHevC2B0ZJfsP5$T~!;Ry7RwtjH~N1<$F zYNm6_1-J^M?{CiXuQD`pl* zE9d5S3N!2KAy)Y+M9fUFgcpv4Os_P(7%T#~qt@^>KHv4a=pibHjKcOkzQ)&%=ZLay z!11~JlE~?8kl8{9ZZAu=o>%Ng$>(dBeDU2dO(1(R=9wwd$r5NeO@;qsUrt=J;YrM7 zy?)3{srq()XYNEdcIQ{Pjfh$yFFuc9yY46IR!Lt>~8m!2H z&xJph5;+j){b;Oah+M?0R(YlX+nZ6}q>H@ktrn0{{xzwXkPU^_t-u+NB4vu^^;OPm z7#h8L1;!>KqUqc`hRXS3;-jQI23>MJUlTcVaBx)IhD@I&0GahXje@VFtYt9RNknrsSP;q>)`c%oh1y4@EardbNylS<$d44IXg?=!s ztg6cX%3Om8X+rAhT+&N3CTp|v;=V%vkV5D3g@E1~@{XvVz`@3$1-sbMLm;kn>**l^ zlB=0zS2+y%H*^%yPU}os=>lO3>r~d?nBlXLDLMe=rKK`Vc>AFq*K6aA^dO$y%~yxl)rML^jpA}wLEg-ymkwh# zWIZ8Pz}-u*{B5|P8ic9-^hyU3-paX|B<~F~$&xxO9CcmNnCdG@vo$K=JQXTYcTM|Q zJ5alLsZG|xn18ei0nu2HS8L`u-I)OWkKT!q&7m z7FK}zlXu61?pF!Xd`rhVB9=Yt@8oD!*zFN!^(Z?gq!Pgrwpi>jrs&eedWV zpU}k{5m#%l;yU8+Aq_16yGZX+^Hm8J_R~sL27JTSL9=Pys~jJWE;hf9h?z_&G}L2{ zg~8=X&S+4*5*|OB+ZTRII4ik&oDUJCHgvS)HH|7k%{IuL~5$ z2;_z(5>cLyJ1140rNR8+5_vE!AgX^3EBRrgctIm1b##wP(;DkB-D0}NI7GRSBFGvk zzE-xcTM4ehY8c!^GMW*Wi#tvOu(8@*i-=C=cmm1_)FgCv?ES{ukXHrW(N3R&A}abT zJE09HlZeLLpr?E4rNOh}5u+T|iN3IP;`&My%IDZTnW>GahCykV?$JI2|B+?n|_u*AMuPZ5B9w zOXLzC+A`k=c4|x*CMa&a5(kP-F$E}ynkfsfTLmH$bpM?bdMGrYjl!}AKBA{%*aYg5 zfpa$9-V^xNh)y!%Mo3RCnDYwua6U}*AkrS0&9LQdna1w(y%O4bK}WiLsD&t5miDvK zxneHp!kl-;+On!KqT3!?_a#UxI0)Z7S6ZcZ7L;)^+^~WT-p^`5FkvRzcwwn9q?QH76k6a}YXYj&7`mhh z$eoA)0aXhs`FGt=Rr1$<%FT&iqoILquMivT58ZX}wO^}bxq3Zif5;a9K(m5xpVF6| zC;be(KdE~Q%N2j?YesFF98Ik^l$6Co*+r72B46$_QphU^tX-5z?BUdjak^}l<+hk) zwVa+3ucnI4N9};5k~onA_+{QzgBWbRn<0p60p|fTTA>%R6_atOPLMcab`DR43BPNN zv}Oz?)oaIrW1VP*wMpmm}&+$&uXCg_MuGXcJGNh=PEltL*LDKHXF8+UcZtP|WzC z7Em$Tg^ccXq!oG{m)t&ChsSn=fs$rCj_YU+_!lO;I&ffFVoLN@Hc~m49(N9*Bp6XR ze0ebU50|+P?w`@!&V?uL(N-v@I-{0xY=|GcDs1Wm60S5ebl+gNB+`5=+B zV-Q*O>HV$F7_mW`5`wBH$9X`!pE&n-E-tJL&?v*Ki=2Fzj#G8OXlyriOl}6)bIXa} z*`yJfFl5-T=V{d>+KTDdWfY`-Md0F(7nM1W$c&uLV)n1Pv?54LR^7E;Gt?OVgWXP$ zBCQ${nIUg@*QL4jD2R$G5&|j=K@BMzugShC>GezuJF@m%%S5*kS z6#_d7a!hRL-RK|hMZg}@0oAX`kCkU;Be^;epmqVJhL4k-{Ywp3n=UdMFj52ywHU=H z(hv*vR5Zc@DEo8Zo*!REF`y)g%Bi`{$`c9MO1%hTyl%pmp2Z$43ka$h2a3 z+qE3}UcNQ$>LeHClQY9dbEAteK(#IQC3NQQH_JI&}M|TO#!=55ESd`U=I3XnLO6G|GQ7GzJo1yC{2N|Ih)FqgjaqPj{lg zx!m;ukNW8bGc4+id_guC-zl|J_Zd%83fjrdG%SQ58`P&4*W!lU!5oFIP(?fajz}O= zaB3=8-Xknaf$b?WbnGIko>ZVh>6jrj6Z9&D8&qwJcD;Z#=*FLYcAO}OVBktPhDgarj%jD6dAW&l6uTt=#J+( z?Pf=k!t&KB5hohhSCwtlXFJM$0c15g#_=cOHKYPZ6&nEWyURVNnNr09eBVCI{g@f; zqFcM-ChSPHV`V6G_6lS8zgIV!2t%|s^<#;9*c2fa>FZ7_WYGlc3GMj$l3f&YbvpR{ zc46VplaWt+zsq-<{|$nH6uvPivD69MSx7w}DMfDs8I=-um^zAnlNcg^iskpe#0mP5 zhh9D~1nP1Ay6T6f%w2dkjn<97pf~lOz}VQ?GV%_B0t#1wZYO`{CAEIgfSj)Az&g+w zxC5ox#Sw8w?o37~LC&S!8oZtS-~RzUj;bHjhFC}XMMk^P^v)ZFcB~(`1}7Ml zOXvv?OV+O$wj1uX)K)c~HB#VBfPOvH%|CJDz-JZnggdzF)3J%|jfi|0J-3_vN@P8u zL9Owwa4)4qRP5Gsnm0{wy4fc~VWKhhi>+Dbz>echAibC9QC)HzVe1_c{WI^nz0vDoa`i%!KE3sUXG@ zCFzcbwwVxdg(@Gm{7VDEj(p0%BD`7XHZkTuh8MrCd!5(+eU)&EHEfjAvIYhVI^cK|?px)2f*7C!r(vcBep`GiY+)ck7wS&*KOi{%j#eg~wUCi(K1@zg|fN%Z6y>rP6h#_~yOIzvq_H(2b1vz1$2V2~T=Q(bOqRSH?>yAF!!A9Ozz3rc6N-!Qd z&;afAIgR)E!?AR;Ue}QsAL-a$Z%bp5W7($n3kzhV5uN(HINbnJ)PmK+NDgX zw+^cFwCPkozj1Ub=JCx{^%d^ipP?*x)rz$Mvp*tenXAGBMxdPaJP_nvD-14!seoz= z1%DP+Y|oJeE>Bw^AC2Q6&$=3M!a~JKYL08q@pqcO^W!S9BQD3$>+|3ib#uDEIT@&Z zW4IW{qgWt532)3eM>Oeu3>A3u3s+u^L58t_NujeRW=s%9C2cbcKLH(8&gS6m=8Tj+ z-IS*E^iZgbFBmC0JYUZBwc+C#hBa~D{3DH90#9?(Ag8RAeoSbG_Ivi40u--F3j*=c z=Uoz^wj0J6Q{79e*0zl|Px5bOM40_99~`M64lV*=qZ4ebodHuw8FD_TvoSl49`-H8 z8e?YnUU4C|&IwzSxJVNfLsDZn+R3j9S1mz(rd{MQ^m&qf1QU_}1{D&N&g_O^6yUX; z7%}b1lv6o3cHk;YmKFYb&q5KLYoem;q)~z;)v)E zt59Db^9A3+4xG{6jZM|FnL!U#a^`M$E7A2MP1g9_Fo=Hu+gEC`a|}rd%j{(6bpE$F zXCLu)IV4KY$3-*pOfj!9I1>CLk^!m@nC->>vSp4@e4u|!!KTlY21!<}5!JwxkX)hm z^K|{D`xv&(w~?(mG?-8phUQQpC9@r^0Kvan)Os`u6zQeLU_rO3qPZNp|l&ZU)) zy@ey|Z>Oh5&s%c)jBH-8`4XRx%<=gjenb5&0_8s@>OcfMed*B1yMw33*q7jP^Ou7& z3>$*cOUWwW&dc@z{2Ab(Ko?DvGXhskQ6l=>HYE&TJ@BFwat7v6uGuv-uuSIb^qsfeLi5RA+J5$auDfWmFI&b?gw}pi+my|gG=N})@r6*-_;j zGCFvlqRJs406)525w|~9D=P1X zJ6S?ejzZv_${R=OEB7Lw4rWbWEY?J;^ymFo(eh-$r7w~U1>9wvZ~!!!_z`}Nq}5=( z&MYM?)k)|T&X|z>$76dYv`D zWYNbXlhBt^=Lf+}mkh1f*KlWyB6%hLCY_yz3|Z&)ACzu~eIMSLYkgo! zoK1g7aXAxU-{m!Dr&>QStFL1;!v2n_!Qk#ANSLz4V2iS1uSE&v#Zj#E81HQ086WpI)+-`0-O>$wo?Fb`xe5g^#*KureAEZ>xxeX9%$;%8rx{nai@pQ}*72 z7o>hSqBNIfoS6+7FOcmFQi+2`739ezg&D&|oeP$yL9>40CO4f=4uA|l0BrQoB)lIt zWqWPRYIB;#3Wj?2lBZ=_!2t&8W*+IQByfzhHFQ?L1I@=bJ8yOczTR0fD_e7laF{Vo z$HBv%bgMX%r#qrwoVb0O8C_B&%2L02Ta(X`hg|qO3TLVfx1DmFau&^#B}O-#D||FB zMHLOT0W{pcNol4$vV`>z;+<43$Z9^-)Xumch^KFj^5{X?sJjRCZj^D@kii@#NL+Ea z>PQc^XSA<7G&9DA-=3rc>N_hxI5&h)wUo$BUgi>cStK8~8tov1*s_KrFY%i^3)BYF zR+zAk2pj z+{J>d)i*g33Gr!2WIv`aj4K(Zgncl7^gl#P^*84nI~|nGRILFh47_Qu-rop;AIb#u z6fJ3<2CV$SL%Si5>qyDq=@Q5My`ArFsFA5CQTIjUnwja7ZjoaShBX7c_>TUcVwbDr zCC6iTAPpunqB{wWaDE8ChrT1rl`VJT`qEIxIaQ)1u-{05TK%$_><-0quQ(CQcsy%% zLSVV;cRi%p3fCO)D1-6I^hA;Em9ou)J8p9}(L1ge685k5Pd|t|2l0P!w#n(L^k~`|58TuaFbpx6)VXW7-6X#;T zG0L+SQ$y{MZ}7GZAs4GH{a#$gD{J_{W9#XpyV9Bb1c&9O$<2+_OhXAqoLN=^HRJOO zrJAQ^x_HpFg#pJ$!UPLH!*FI%!}Lu-$}abr{`?`?DI998jZn#c1M%<|YN{M-ChX)( zVJU;%uhK@5F6NGA>z|A|tMjjU7 zNkor|^c!k$r=5Bs&;DkBV+9Z9b?Us+{~_c|nZOtX%L|)1i1+k^wl(M4Rw7h?h)i)5 z@}YWsrhfR)8vA|L9XJ_#`^Nx5aSmZfOU@g$Yxm`vwiLd5n3y1;6m!jJ>tV8L!PToHH z1TWFyA1~9zl}XxFD*t}R^lA^0)6mM>yALVs8_R2f4T|BbE7#5 zj|T5zkAOC5D*Cv+7KQFqi`|;`y2tCh>{@*dfdJ~edUAJ{@{~eZZXaMsC)$pNs{x611D+s~^9q=gx$0n^w7u3WUiai*P?s zV!Mo-Y$VN7`xwnlC2rs7k6WLS(VE)K$8lhac1D~b8+9&1s8^L$aoE6IR9%wQU~us; zWmJJ&@jiGb^084~4|q=if^RtD)j{JzhV-8`Qekf9<6BmiG_v%U zpcCN|C-TX{6oWeM z!!xvs!bH8o;$l2(gl-8$Jnj(RA1?y71R&UhExngVBnc?xwf6!3 zEr&ern_Hn5tBgGA#XJ6-+(K@h_D{X(F*rs|zMJRW2C=KpP-;kFIUKHWo8gOI67zwbAue~`I!N*d({5Y*M;Xyy_ zwE9K}NlNhkiwC)%maNg$EY7>&^GYD<3vaC{ZQA`;{gZ{iQuEv+t;U4UW# zY^=l&%dC~ez-)~mSLZsC3fSIV{G4xjjs61AuXED=jf)4$52SCNlu48JI@ZDkIe6RA zXs?}`0@~^I?vM)l?kGw<+F1JKg;ow7Yr-PqT&du4U2Z0wN2}B!=RWVtCn(a&b1mnG zK!YiiPym9{td;=qf&Hy(V5Bt=J%wP5N0c05GftcG6%^_F|FCsWO@e4m17^#%ZQHhO z+qP}nwr$(CyKJ+|o_=HEiAdO*SxZb+$HOt1{e_MRqeo%ph*qCDd>KA zNf`CCnh133HOh;UdJM06QMM8%xNIc-r}VgeRaVMcfsPGE&X& z#?-9xrX>(Gb^=fgJ1>jG3XABMOX}{z_hcioWC2er43C{WJC=&@O-ouC08p-e%n9_q zzBf2Zb)KH8)}NkWd+W;6T8fkfx|XRaijZE|{&CpAl}k4A$h?XXpoq4tJWD{D90ilL z0BLFBM79*YKcv!Y!-pZJ1|Ene4>M)2oYI%?FC5h(FQ~E$7fL?Q9i*h^QvbH+Lc|>V z@tj^3gU<`#6G$*U(AbnzgY8c+kjRq(c#^XdChYA;+Fl8SxPUzSQ{xL8CG$!EZ!s@s;blbsIB8?C@@w`twuB9B z8ABz(Z5$T-q$rUn=;P;VfI57)`J*<$GZDY~h(77no;6)syjSKh68eCD6kn0__%r<^x8b zzv4u4^ZDQGeFX#)Oh7X1QQ`Av3fAVYWuI@x>cHbyok7W%55zBi4;c<~gH=SLdDR$P zN{_6E0Z_I1sUt77V}JuW+XG}Yb}=&iwCG$h(hx=qtfw^&2aT=wbZJ?8HoI(VK7SU@ zKq9QjQbl&*c1i&w+0*AJGj(;W?A}wi$vujoGnV%eYo~+ELT^S;$&#!2MU`lXw|%{{ zzW93T=Y5T3WmKc$K5{<_)wpZ?E@is)GF z15mIlg)56pB7;Fn5=G$`MXA?LRXDIC0=(6;+X^OLa{gc*A-2_C0Jk`tgqWk$tS)tD zG$5KnQ|U%W;996`vU5Xkx-sRek)1Nd_3^g4L&@+Fvfwbt^K7JUVxY$OM?>E~Hqn;( z`Hec(jt?P*OQ1ec!Vj5i2Vcy+T&8}TX=~yk9dC`ImFoZgbti^pVYg@H#ZiKL(-yTo zsiAl6;;RV7fcP+7!jYI}`+foIz+$VVS&Ga_UyAcj;>9S4WPt}+`TSV`IlESfIP2@2-LXJckg#r`p9Rn>OPKXX$gT;0&+jcHXx^7W zW#X*b?7Ut>v$x?e6bCg{^!Z#Yb1|IhZ@xF8Ai|?&Zk$c$1uqJk#j@d*2tN}o1}Vpd4-m0FnJClnN92l*NadCgP&J z&@au6HX;p2je6|69wl6*`*LaF0a9z4MY;RESTws6{C3+bmc z{=sNK!64J`42bD1`*kNcda2i)C3+0!-s43$N`++~Pe%Y3A7|yKl^ZabKX;lJ5R>;H zF4>89X+rwnD2Y68FgV5>ICY4Q`fJ8@Y006rQOzwe^548d0c`s&=q0t$;hX^MW}le6 z>z%&$?}dnj?;PB)oA{^#-iKlbUJh&AVB_iL6TyOP4E}-4x+>Xs!!Ks$p-q7U2I1oj z0WQvZ$3W>_0^ij6hjWrLjx>s@HWq&f96^LvjNsF&sJZn@Kp4xj8?};uA&xxg3m%bc zGUtZU+whaa?XQGw{xAG1X1bs0_c%M4vgazeEDIq4kA+icPe&gSXCB+tzAX2iwI!1n}UQu&l)vaOKC*N1Qi8!-p1k&`{S+)iu-!r6`-8 zf{mogZx5<1p#yx3TD|wHM70=uHGvCF5T~)7536pQo4*)V7h3cApnj0 zPw}*8*=H>tI@Zy~3i-c^&;75bPcWH%>J>>nZT#I({`tOJ!pi|n!rxBiw01z^0se?f zBv)UhLHNgLk%BaY`t9Kxq6s(m;*cdy)V60i?v$G($P%nH5ntJ)PO(LJF8Mhx;kZP~ zr!5Ne$}f9v+p^g{1nO=CFTQ5%EW2d_po|F51VSc_u~+o{jjau7bfPBb566jmX-nLeZQFAKz_52Pu69-O+o_{SSsC}hYWHZq+U-M ztmvqqAJCBM#sF4l06Rd$zu(?UdV50?7^gX>lSfFwyciBTjriZYrr2I^2?bu{uq5j7@>ZqC6^{h->I1mp1z0v+T_`nFfzn z`GYA}gaX?D-}?6Gq;+U3((&gqd`Pr%J->4{0macon?^kOpF8-8w$pu|QxXjc4Fu#! z0o9$jDl>s;0Zeq&P6wn=Zm{i&Tnc2)nOteHjT741S;7Z{;bI7d*Sa46%ErhK&1Fxo zyB?YC4-ny7*@^VmLtrDwJJ9K*I2uEHT*n7>f-kbzih##wt-)ZcAybN0WLk+#meLPp zleY6$!K6#5V&ive_dUYkVJi(sf{0IoP8^gTIpt|(}eS8DX8!&_cbZMkHv{jmwHF2|sw#o&laK};@X zsxCem7?Wp2?NFncBUbvVICY$UMYX;_7_N>mrm}hIPX0udE-7}w&n}M+p75{8s}IzG zg16cToGb+vdah=1%NI!rYTHM!Lc;m$I=H~7c#sTsHFDQm*ij!mhxF0l6(jLGvdGMl z0YHOTi8T%3D)cF?u%k>&r&KdMW>5$>zs66ae8T!+n1^ZykC?bqzPpPuAVr8k=Z@jf za~G@*cloIwY*#|xrhJCVzm&EA!T3!@R0K;SRE~a605*!$2X)U3pK=3v$*@R^Yj?U_ z!`@(Dr1?b6_<;zH&B|ZK;a*>%@0gk9ufItQmG5uMTDEzz5(wv0rrWsbs2TDv4SCtW zF2kJ>sB6p5c2kJyfYDG13u?!tMzff*o2DqN38pqWl;<2njqqtR&>zB>M(?X&n|Nu9 z?mw`{dU;?-D(0$J#CW#qn#fdBs>;(bep?9tdVvwD;C68TF~Ae3Pgpg-Wzwj^p1Km5Y+N5RvfUsgx7qFFL82O#p%b`_5o+3Ip&ef7};~Oe;B4fu`9%$N5 ze})5=tJBuN?xaFii*OC;lm^)Bxeh6fh^_>s*w_9FS5aB0DboWW^?|-DyyNdMdar_H zra1KXziv_-@Pg?#CP7&LBO&Df;+2j6^o2|VeH+f*h{wYb%OQY4&wAa0sfKf{LQ%6( zTiy($1YFgcd@5Peb5l%%I(1lvg~#i{VvBpAGFf)bPDB7FM5*%94p!qD{1?CNcw#AC z_gOb*pn6mfq95%>|IMM$(OPzR!=m?u6Fk-JWcIg)9X>`x_fpPv4S@{^>ZaXQi0p~z zLKUZX$lE6|AgERp5$^)Ku5SnR(==5-@ftQX*N2y&Pr=_$4T1^n}WdT6~`*?czPe1Cxtd&W(&5+m;UyjT1 z#c&}9Pg`TI( zeD;7H(_644(3aM~llNUb?&vhl^?*EEyetUPoQY7w@_BbpYTSxNZ4|K0atFyIav3Ei9G3c>3l100|i#~~52`K4$Jdpt;^c@YI{911tuL}^^pXej; zdQU+pA?ggqC*hIUg_DM+5$ZlIa3%fSyPHq(CANn>iMUDxSjnvLfm#&dfR*=NCl3o9 znNj(J!4YP9(C2!TGg!7G-`)fWtt#fz?Bl24Nc`tf(wegw%AmY;YnQZ!Khn^;KOwB7 znSGR_G9rNZHv`CmW7E8JG%LRu+y0)Dkuw8fQVt99f5) zcw)GV)lLAem#da%eQ(%7vZZB9O*lR+>G62X>k}csio?BzZ1-&VWW9`{<2u2fyD(8s zoy=jXN^PC*8NZ>FG2JyD-35hSUqK@GZHcU~1INvA(%Kpp#d0Qm2JU5`_=WE+)=5-S zUIdg*1)WM-;|zt&`4 zvvK$N7tjq&ifA?ZH zlTYRer3*~tQUhZl5bQNU|E+EAPWR%x;dIuU0w*BW%IFhi*cE(X#23RfL_ltk5?e0S zC#yZ?4JY7>scfe3tG+5T40ipfgHmBKVf(!6Js1;zlv)Ja&977-FHg0893|2_z=^1P zh2c0Kh9oAJHRf&0O!|Wi`Ti{V@z%~21}%;{4@Ypl`?C3Lf}2$i_@gi&W=)XZuY%-$ zxD@7dbjP)rGJ}(6Kl_TYyxUO>zRM5Lcs5^L*3b$i`h#KT7s33!YbFA4;Cqe{($4ZF zpTuQi{4R*40!VHg#_21}s+0c;u4LXBisxgnq?`RoOv*IV0tCR6h%Y_wEo;bOMpFc= z{oYhZoR{5kpH#zV3#!1GPt6W6X93uH8SS)gTeF8AnQX0o|JCpK{5AgD2%&OdeTo3;)>bg)`+3g!0oixnmr|0G$r z#jRz+p@NxiC~iSm?6Zj{@XcKxs*zYs279%$E2NYQaoD#YP*P%r)hW zb(|6;1ANp5FW#WIP6T)Q=Hd9YnIz(`HV^jy?@yC_6oEhs{NcTaU3yCTJQcHHvnK;V z=G}7CH4MPQ0X9aS-WPy3JWc)Gb}zu=7ugdxH!g^qdY)R*g>e;}*nMJ)@8H@zK(iilNo~gfz&6ABe4xE{*wGEEK4G6u*QhNzQTZ(3w#4-~QEXHfk;8_0| zcc7=#%1w{%JmY==f{LV<1X|3-Vq zen;0=q>a(jH6Q;T!-`$T1|#=9EH)3?PsLaU2x1u*J;c4T^ZY<@0BO2+qrh87=V$+m zv0o*V2v_@rYP*_K^8iqN<@2z4%Wk+ix#@2WknM|9B>~9AYU31iQMPL1JT*zsifL=G zvLGuQqBgAR?tYbYk6LYG1sLX|>X>aCdy^2`*6-J+N24H;>5%vTwdM1-!QK#^Qs?So_MfQycRv~hka>i#ky)IHe6C0}n0?AEcudZcA_AX&$C>t%Da&)uqX(upp!t@B zmAjLsSJM*9!ibx;-R%BSs|WpR&>zMw0ty^e9Nli#t|*DW!xPEn>sMTFovPn_y$CRG zVY%ZorSs8nQ^Uo!Ay~T$EOGZ0!VC90N6(%pv4e;K=)@&~M_57$M8X}sl`sv@z#6%- z0fxoH4?B&o%`6vim}$pD9b6j5oH&W)BRi26FnNq%V?q0Wio5GKHL2{`3}GHd0YDFq zl|hC}bD@u`%m!uWmdAv6bRLx?3aVe;s_cuRsP#KdxtB92{DiaMf`;@K`Eb+K8X>Y> zsgRYTO~_`PzyMv_edlarHKEao-t}!R#f1I}IP8fu}%;&siboGZfeS#wRyu*DK-v5WmD{}MUOiGby~`P#q6Q}F~ailiZzKzA1kJ4&rR0X$jB#Fb1;b@+R7 zZ|j5t49ZWSaUP*@Cy)7P*u6l80j=o49%1t_;C|;JP5;yFu{s{Lq&Yi#!KTt3UMWGP z0g~^Sl7;oti+rm7k$2rM@EV_=&=;m3P#>2<0~JOH*iE#9ZBbCD#2|o8|u@h{2 zqBHo+FEgBjjmT&>D;`c4P!l)K}1hOC6NyC`uQh$W~Hl>1X z{6`BbN)Suanf0uvjUu8Zl(QMG|IVhkJf4g1nJLl&%If$nr45VVq+-%$DeRE+^VHt@ z#iW|3n8J_!x&EcAB{nn9cB`5Ibzbp7ki%BP|JBLBh5fL=Wg;g*S}%(xP{m<8QM)Dd zy>*R{d+Hp+3}8AbR%AK^OTuF$Mw4Ht8VHRP-eHo=v&_LGPV}B>W+=m)TE@NZb_?to zK_)jZJ>v?C&sy*%V98L z)U+%72{39}iV!)0^AmC6Ya#YlZ*NfWMS7PNOYOZ@wSBF$MaCTVkV~3fW3HEMnNC1e zJLO*E#OC4Q2^cXKJv+yb&=T^R%SgCW%@Aza{*N~xvX>4*cKN+oLE z&Vba&plrQ4IMW5|iKQ&I*V1i#KQ&0K$jn*muZ{WV)_7V&dxgAQ@T7=hA^*iwi!m8C zQApIONbZY7U8sfkVttka53)Mtp^`|FsHJp>HTlg=FOU$)rEjWXC;NSw?XRACW;Y8C zp!)-CIvd?Eu_90dzJBMFh54z{nBUw72rgn&F#f@an3DdL^uA~Ri%4BtM+@PBd^)#q zW4nfj((`Mh-A}kH0@1uJ=_wzol%`6z`RBMPMmw}u*l&5sel@=#$i%%C6(Ze_YL@A1 zKzW?nRBpH+Sd#QT0OhmP7>z~;(QhP+)-8V`GC?(qPYq`T=#N`zB2eq}IDT$ft3!(@ zvTNt%%_Smgh#l-fjHcwlgw*K~wMMmdNf&*$PTA5fHL>hoeQNK_EywdUIxP36CoS!! zpL#ES%QmE7yi|$6Cub3%L?-O%r&lb5kHbB z{olg87Z0%n5`J-0`PwY#cnKA}pY>swh0s9Fork>9KfM%&&UYRP>?LiO(gA>?7nCJ} ztgC89+DfjlnLw4F?mNPBtLTHuJPOI7HDLa?|6j}vQQ13Ss-CRjB_u6kq^PPj2ZKY&qtdaz}{*{|D(-?Mq#c<$A9Az#arlJ8CI%J4kHvl(a3I zoLj!SyMBgzyRYjlPnYU!v5k*xCTwZ52P0Mq&yzG3_DHmv&5tm-K2?s9=m0%Le1fPb zY97r8n`A!USJ`_kCIqL(jZjnwgoh%@`&_u@jr_Fq$42d6`3+JhnrjD{IIn3F_?RB7+7qI51x>ccxOPM#pAGU<;uw0ltZKqSfx;qd*dBmHotx!nU(GMblaNCsHVbpOPW z8Q3%ySB!dv(9<7-$_mU7KKeh#h$yKa;-J~_ZEd6)PL|$4Y0=;qaoL)o{*}L>jIw}q zHL2o;GZXifI9Y6*N+l6>u-s6w$l5kN-YoRft$B&WuEvs;8-hi-D!3X=y}JHz zR?7cT7287mU}DYgE`I~DR+^SR952{8cK>PnoIihF@K(ZWxDRuXrFTgVxUt`|Tn;{M zjh^t_c9y+ymA}Zn5gxshqH`J4^r=RX-5zaa^Sy8mUBz)YD+H$Ei3DQ z${DWvRn=7Jr{uFn7R#Gvs@V&!@_MMeqyZe*w|<`_CJVysXvz^`|I*ZTw#1Q)(tk~f z&I(kb-`9&Q-M zPb-4(fQti1Ec%j&r|WDoH{eh<_Gxf=WL9NoN_|RNnQ}L((w|t4l;mD%W%Ty<7z1Li zThr&i=&_@cs_3+#Y1!JL#R|nzAxnQEEq?TDmC>6EA+yA4zw#Gym>=OKbDpv|D%#m3+EG%mqYAy`NYU#rZ?|kBUmTeTLDugk}OiGrS!+ z879@on7-mQ(Ej3ElC_P=!a@&XXgQ9m0@5yECAJfNl!LwgMUdg{I$D#yG5OsY3vPu+ zE3TSb0-^f!>4?6u4n!;U@IORk&+g%4-jgg~q@e!T52KoZtTPZgiLFn&-U@KFA7>&c zwy*e_)jo^%aj-(YOn(0G06@oe12aEhw%vHV1LhB(E3eOprX^3{i~ z#At;MpnV1L)b9#Qp=eHwoi8FQ^x)GNbgLnV{uLBk|70K*YHsuUB9l+XFx5{VyvV*p z)(3YTExOXM_1N+^ACyM~ln>J#kw1xl|M#})vm`TLT=C8D z1{5?|b#_NT&MsaI)MV$p=ijr>6cINKh97BXsmJaogH96jo<(Rh}^x;gukr8eM1>l!9vi}GEgWd`6g}1R_ zwId$)-}18b;fHtux_%qA>5(RJlLdu=12dAgzUf&(Dqk!3HBq2+m#F;cs*1JosPM3fA!xvkQcxTgm(fl$&H(j@o4@(S;pyL+UsoWYg zj(hM8?d{>d6;epE_L7S8tCTULezrhNKu&NCDQ0oh3nhtIuOFqq zo^XN2FZpdkcW|bSen#t@MAR>gV!*v%oG{vTqQ49psm?9S_11pSEjrHo7@wJ!5jnhf z=)BI;EzHH$8T+SSJ(HXKiAOpIAk-yrstb-)qve?U|Iy#FL2}1a%E4n-@ecwiNreMu zaduMx`jsxGqnVZUZa>(LsmE^1nWJL0wCFU9Vc7>Dh6OI(Sz)ptz}h9D4fNht&h2aF4d7 z5PZG4P`s700mxf6M|&zA{L`DBdj+8rqA7rNj3_g~jZN}rGOT#jJ#2Am$K1$4a(JsF zusi}uyoCxQ$(H$W7G&IL@n#1bssnT72?5;z2Qv=N)*^A#>BcB1U8VEw{dLEN7(1Gr zX9rZWTjjh`o2<~LB8k(_a9Gu{jp4cozJURp)@2|z6qKbU5Kzw4|3c`yYd|(17~l=z zm(4u+13!qYbkr4Irf(_P(4H8VPQJ$lvHKp2mEx+%++3~nbfS`pW%La!&XsXslYuu< zOe9eqf4i89P`1^gtCW5;DhK=LkAmC)`|=2;(V(p-@K!8s&Cs&9O`@tUY<8J2@Q4#k zkEqe>n#ojvz>iC*U9~-Z&Q^w!y-?*Bu);8r@ok)>gtapb9jIs;>egctNPySXXhCXi z%mnRxWe{}RHAoGGM1Y+`nvI<@V2>XHbB9{qNY|gwCI-Ttp>lpZoOvcs7RzlDo(X)ig%X4o;-BV;KOY^R26boT_W!tA^xOBcwOcaG>wS^E2k^ zye9A1u>aN|>+nSLz?=3aj3_#O>I@7PE@~^W~^e7|?f zL!QaE|JU89r$w2fir-HTV%KGdp9gsd6OR9qvPj8U#oc<%x(~yrQYmp6pHfQ4<-eS~ zqPFpH1bUI|i!7>n`LX(R4tp>GM-^6Damj4b6m-h(qq*TA4WQJ~hTrjRlORdICL|2g zcyQ?56lGO?7Lf``MdWv8n%%s4hPCHrPa#-$OOX#D2H}(}CtCbQC0y+0K>oxyna*VPGXPbs7=;P$#!>Gu*Ix zv%-p5%M#oH(+^toDX#pxBGZ7VxP1$dAj;O@pZ;2KOPzSulSl*cn>{n@ll%iVTQ7nK zpPisLJUo&_r)(Bd3sDajr=k0xX_=3@_TEKvbv}JU+C;)P2t7~bONKQ!A zMEVr!E82=~D!biE?Fpj(+nxQyXM9)l?9C1 zs8T<*%?;HQh?#F*`fx2t-xP^l=-21Q-vbH*a!!FHWTr%hOo&0)rX32FCL(c$yeHTn zuQS$SE{Cdj;cF-x5)GoJRy5%T%}z$gmb}?>}en%mGZx&VA`Mi>}ao$Nc^;yUf>mfkhd5xyp64U~#e{MhmJ$PX03Kc z9CCt`RmV(36h+p+8Vum<+Yy=|RnG22(Y6ZvE<~qi=|z%V%hHbYQyp7(J~P@p6Jsmf z!jT^oTcjD2qO6ucy#zPpSgcsJ!FaKD?R&t1j#$s9oez|Mh@~RS8m3}nvh=(`R6vv~ zz56r*5K2nyfAUNw=Ag@9It0-1*p9&fYOMzaWgN;cUCEI?*-rU=xDhSYY{c3k!0al4h?}+!?c+cTL=YWMU#EE7M zFUc}h&13FLFq#v5T91!b5TC||^w9J)SMvmxl{cSE(A-n*@KaU!F*_HOCJJ`G!+m%E z+~U@cTXl0pP@pDF^m+9)a`d=?NaYv**MS+AzQt7%OR?a`DLB$&3u5R#YP53~U#v0;K5=e~G5K_*PI4)C~hm3)& zL{VcUa5zeI{J|2pxEgMt(+gH>Q=EYPA1Fgof<;oSs;_~Mq1O{lH+kxx6cRJOKsfnJ z+?w;mU)cv0xnA=qK$3K}nk%G#dPs_|xL3KM-`*?g9TY^M-rM4jS-gz5|C*I|)zZr>03Gp?`pOS!E^Z*qLa`xXEU zJ1bR7gY)HRyd__fJeO6Mui;{5``tY1CvrP5q6KiIa)pDndy6-7@RC(p z0GXIilQ6$mo8eW}%jFHp>PRi}obF>PI)~-*RsXd;sO3fJ z>({B;rwdAvR;U?jsOayK4)*khtwi`sr>=j(lE;zIz_Q&sqD74!YGM9^C+(e1yi?tz z$P?)r?iHw3JJ;COP>ZcP%U&&Kwm*viw^knphjoe6zXn_T$4KEB%=jz|k58!=?7onr zz%CF;$4!B@7>h9*u*~+U085>2&jy%(*Ba1rG^l#mNo z8GdSv zyeC`zEdQz>=-)IYC!8|DJ4iEV{=p}2oR%L9U8v5mf6yu(OT)yZAHD`X9u&ZS0nqj% z%2vV=NRU4CX9#<9or|DJb5vva^5kWZO}ex!0KSYWjyHQZh~VPL3tP7eZk|o%#Xa{` zD#SOv=y~L?pv&c9%g!5%nFMr?LXm$Giji7YMB}fvlkTs|$>QV(AzKEVlra+;!WUM+ zDh}8c5cf0&aRA5pAWDcL{W(q1)+gas%b zkVKa>w1iYaHohi>*`zkjkjDf;rUU{fLO;Ckpxmd~w;ae!#}Dam-blCO_y5G{Bnt8| zk3)w24mye#mA)QDDepPMRFv{I!tcb2S`vH+lMtM( zThVOiN<7&U#@Mx@ecSyRFPEbt*nXAUWH>0G^!6nCcs{jo)TGepw8j2fRKLf&dv3r! z@XMFm4DvxzhpWRyPalpwhIB8#a_~KVjK6qBadh|FHC84 znWKN|-($Z_>Fa{RQvtK5iA#ekF%VuhF|nl?rJ%cJY}4n4{3qj!@z4w;zgfnIFyyEd zhx7Mm`TEm}y*qO%y7uYvu8zkN331utYf$(d3v~hEB1R6mO(w>|F@Wv|41J|(^!0WL zNE{E}ieFBZHrHfx*D7>=2Ted!7y&;gQ?mfjzhJJ@q`%u=cRzD@lx$qYShKA%3@a7^ z2Nmk%%#J!+2ZJu*bao(T2eH-7;&@-X3RxYR5h9fBkR4zB8#sCyuG|D+A-0Ji8W<&} z8P?m1uT@YPc)Ufpm4pgaQn8}LZm~|*npw6mdH49Rvkbm(oRdo8XJ@~^rpB<7*dEA0h8u@RZEaeIgtl9SRyXo+owXK~jIW6D2caGv2Oom~vLc*i*T@nBZ-~)P_#P`| zW0Fh8R!FY~@tm(Ug2*Tz^n#n&M`iEf)4VO$Hiu>YL2{#xf)%Zr$O%?pZ96dnX4`Lc z--!XJD-~?ML3e@D3%qcU&>cBoPV7OtjWMo0!U!Lx40ee;W4h^KBon$aue`cd<5o6a zj-FqWlA=stFq6nA!|}pd*)~tp%hODF2`R25&IAEMHbf z6MPPCYP~lZN6Ko8veHm-SCZ)4~FHf-pK ziwBI!9MQ7>g(8rt8%F?nRUE_n%83>y~uOyP!=o7@S57 zrij^#>B;N@9-5RpDnN@GQb|ABm(Yi9o{D~8UmDJTxBw0)ZWEiS$*Nm1w84zLN#@Ef z1UKT?BRTU8hJW;ET_Tyhvt7L&$)g=5>Zvg<- zLITkYI?LycrU^o*_S z`+;f<>w`JTh94$OG7y!jV8+wq)n>E;CGQMDtBWkFG*^#Z&{3Ilh~aKqE7@fEr+^&+}1?AOakR-DHJ~O2qx9d4ksmWsxYJ(s!NQ1(>KV zOE~hcVi)a}o*t^C6k1tiR)@)U`gtr>KY-%Q_P$^@UpNn8p8hHyZa`65shrQRZ6CA& zQYH-4tdH+_N}}NIpf-gL!HRE$>Ye+y#zlj{`f|ff^;X2C+ggU3GpV$3d558;(yqMz zUJi*ql{on7IzYBwRlx;OZ9(o8S3S=zr66FolgGO91lUHHtUEG-TlW=kPZgW9Wk(r^ zsXYKP;(BDu4*Yl_bs7O@(lrM(e>W;VCp!RvMgo>6A@-XMF-auN)iJaqdpyEbP)^Ff zvqpZlU`EhEhM^}rp?N-Y5WQt+DT(rZN((@(k~@T*D`1b2AgJ3z%ggcKD35G&wkta* z`^M1X&3P12BgZV3<*`7=UJk*NE^5G>{d)KzXch5ZM}i!{4PG%5i}pg#@W>W0u=*&o zd%l`M6TCls2h$U6Y<~H4z_MEOhC7F=_V3ofNxZF-DGHp%b6pyXEd3$tJL@EgqL4EN z2#X@C_dqTRFbYp@%eweS$M-ELdpy7}j*^+aRfD2p-PB2|4=6j?NvB^G_~KTJnMB@C zh&iZpE530Yy%39S>@f(}g}_i_s!-Ed6veAzw7yMW9_r`uZC zD4(pGerb?Zi946J`LJPzfNY0tiAxhthbfG+SV>Hp4Lua6h8bhJ4TIM0fVnnlWp~!M&;ld4RYmf=VV^YK2ZmzBwt;2B{JRw7yBSMJLwF>lmGn{?*|E9+0Y+XnD-AaoG(D##3R^FXWpb`6%Sl@)B5!D zH%;Os{DmyjGMisk&dYIAXj!`!^|Ukk!sn}_dHYLz>@btCnYY~C61bqFDkiS!Ot)-l zU(2FGr#;F4SVj8a>Rrl6rJA%~|05C%#BYOgyZ;WSvW^%l*S_`%;gtJ17Sek1vakTy+rVME#USu!( zoUYdK+j!2~%yZ?xzC%yHq`dEWQY0IHi;Vf1fy|fq7#&^COfR?IXZ+G+9rMYbn2Y2v z+xH7tK${Z^KACXmo*MEO<*5|g|90L&XtYEuMbLTu`cFNWI8l>Vx_VeEc+a$Xr11lT z-1bMT$xtz$nH@r;qV$yOV;~&;mg3R@S5L)_$^T#!|2hQbkeFtb>sIz(a7|%`yi5`Uj+Il27tN zfHhZ?jPnWRAo=eyd?cU`Eoy_{gKE280=ZQ!%jAO>)IMGM4UUT34GCFkMEP$TX7Ufl z=}eOQYTUCy&5sbbyROoMDQ*d{I57UDE*+uw`44j=EpFmybgMg-DZAn)4uicnmdql_ z7Eqz~(3WxuS6;@JGuyvISoKomPIC1zB#w4wCC`qRCq+;G#CW|fMo&CctG**l2e9`n z^r`U%_xXLGjyr8Qe)093qL%w2-3s4GhYr`fZx!VMJ<_AA+YT-6uw!z%&aD?*6&jtJ zd9*`!PdU9L+)B>Nu|v)Xv$M0JDh{JeqZtNSS7)<+>o0a$hI%HfIRL{>y&P#qdr;6Y zPkU2nXQYAg?dUQ;ks*AQ^fcZTWkA@`AW@$7ASrpBcO7vcrAN}15Zd09Hz===L@Q5{ z(+I2%2~D0~`FU7nY~l9wI`^S9p5H&Yv4UHsObE%A1qH^IUnEc5!xAZ5Q~cD3<)^r**k#cP8U_(< z9K+#XurTGX*29pKT^l)Ze%Jc%4RyUHld>HOl&WixeDO_W>3xrPw@)A(P5Z|Gm>#B> z8x9ZY8Sdv@DYEBqIgQ@lV)mK&TD?#f z@;$%o9;%xFq4qlS;4R$rc&smW4>@HuT=t|th5NQ#^ zKS98+pcm#yYcifo-rNpqXq6LbwiPwd(Q)0XD{^)z-eQya0`P8$ru|=aw z@G2u*dk=?t6k`S*g))*(5jpLDwVCKuNwXA;0-c4k5`@~@6Fqh}C)>0en>c|p`D9aM zkpm`m$7VS0?0Sst4^jnBcI^|G_S0aER<3T+*MLl#b}%rfqpK07NJPnxr)yfGk;t`za z8V0Lvo2Hhf(Q7gY*W4*jvT6)~XRP_*_rcsxp6VkI!WRiTN}TzJ32lz_jpFCim+&{% zo%;>2QT6HDJxCO6Wl;7i*NE4pZGz!S@n{BE=9_7KxU?H~&^t*>CHLvbL3z5G_;lQC zP&W$5-u$A>tB1J&fsvjETJtv6+cAdM7dVPh;eSpP<9Ramuj!rPVNcpxN@>uV4Pr*w zU`@#Q3$Z4xBc}H_IDW*En(mazL;jI0V$a<0XlcCC-vft{rN6o06U3805cBI}vr~TX ziD^A`#fz{j(RQZfH{dn*y$F$p@&9Ybrv0va7HM&3jv}BNx_sxVpTgTLY$X}Dm^A`s z=uRIyqbA^~Y4*rYrIOym8=Nur+FQ?KzNA+W1Qj*f zfF;%l)JM?0@XSxsNar;QRTqR@N6MgbANYz(S4Sp@NPh&Q1!hPC*zxl-&`?n-p&LYy zA5;yf?!}>>`wo-#wtGzt2yG?avSW)4*jr^t3>zDNe@US&v^$9-_CoBDCD*es8 zf(cn!Y3nj2m6po}dg)|jahd!4=yX94IcoPgl5@G#C~^G_KOZW%k#zf&f=^k*YRJ6` z4=|Rj310a%Yc#(ed6;th|_??qN+qeZ#~)7T5HxC&`ni zvQiHbRH^I@mA=7``(Tk!^~(+eFaCeLync0Xa(A&SoCFa-Sw`;r6cI?J7q$zx(;{Id z-ndsZW1->F81;04kHX8c1U24tD6N#1SFFI#B(w_ZfK9L=7R}>D7n$~gkc20wWFsj$ zmzY_syd^PzAh556r_i-|XBvkJZ4|F!x1!R_L3hu7h~H+RtnDACDm6I)49zx+g1KpMi8c0`{ru8~Q)6-(ilQHLM^mh-67A&IP4X8XLS9OP4{BOEBRWHmlE z3Ob{_C}tw^<{_~33@Xau?PH{O@Y>$xti8KKHEYEt9emote|~s{h+_71SQJ^xb0c#a z$o{uNbb>3UAclGM4f`0I!C9G!%sojER)4qeEaN~YFhsq+pV?5Q4yZ)1UM5e~U~n!O zM+l#9tmqZqNXBiLV7Aoyem?0r?&;c)_Zk~LpT!kC`TdpVHKz!3UV@t5uB^ zQuHv#f(9RQ;hI-VJt&9kmvDzNu1bgYIGg<6|bc9P4*tIX>H*?sIprnI(41# zwLC`FN=)P^uAG>~^6SXDD(2hTU$P38b5$Js@(U&3_l6q8=o$zV7!`?ltHYk-|H>#L zyt7-@uaad|+w~0QcVVA7ZIC$8f1oY-Ud~BnNinpHl`Z_~w2i-k>w}$33fqqWwKd#p zOukoGS`OSOJhJYdYrH0J}DIn5Z&kfvgwwB&beHf_`o3wCLN-nvf!syLF1%&9UGo!(ZWFr!kkDG>j*E1 z4+sI@OtjepgV!EA|2W-_`6MzE!}W3dU&nrm$#1=ovL}k#uIY&^uWY72?6DON&~uc0 zDcEH9ZRdu_0b(A~o(Yz#KyHKa-u$n<_s@BKguP>UC(qL_9NTujv2Aa-v2EM7ZQI(| zwl=n%Y;2one}A0&eqKD+nYm`B>eD^dJykXHqNlrRDKdvFIeOTkk$_h8SurcNL=!Ze z7H7oIFLf%~cy3YtsAs%7Kn}ruuVPG zv?lR`FxB!j!pHbWTzzPDOPAdw+*{zGDweio;{0Y)UQoJ0Y0#q-7QnuOTL=U6qTi5@ znT>bFToM-nb&6g}e`pRClUHlRzz^KtWj}Vj@E!Vx92;TmJVT73CsI<;Yh$25X=TfX zk+Lc34^ZCZ<924l*vGXS)(_cufia|kt)TMrsT~ZYhBd91)6u&*JgxrUCHYawWRy*x zuLsn6Hq>>5tnb^{Hjd@q5WDZn{zz&4+bV6E2)$p?F|H--ZpY|w?L~Zi-&|rUphs^VX`e9Gsx0uG|BOkwbEt39@sBM4SIXITR5?MO$3iQI2x2tj)pBQ-W$IoF+Lbv zB~3?M(vYo5ra{g~h`rA*ZQn=i;iIe(dCkmKwJn1MwDhic^e%W|T7FU8pfd#ZnV8ea zwqk{E$0Q^kR99l#zoldBR%pLT_^Rob6tBQdv_S@|lZ9&Ob7_c{vtO(;O_o7x1e>re z;XN$`K(y712$3HmHU$E!m9OzwnIKon-4?T5$AA_eCu7K6f{-}LYWkVCTOtQTVq}9j}Dq?W)vE)-d>S&HcP>jn)PHK+%g*^^tHh{ zSNg;d4DDnr7XMO&`Q0@o{TLcDq^;C4c#L~~?BHI!Z7WY)5|~N+x#|JG1>$+SGaKmt zTM)m-w(Lo+cByZ?Jiy3ndqAxhe4b^S{50MWNq4wpbQ={{BR61Ho#~`BO8cfs=CEOH zFGZ~AqOc2#hX;EM(}QUzAX`28I1+a}sKHAwO6ClGD zQ@^#1eQzzlIcJ24ns*MG`NOZ!g|a^68Ce9);W7o`7W*J$Jrp;**hPLjgGM1i{L zr8IBX*r9MDM5zyBKCU7`ImnEoyU8yrjEIF0+9)Ab$LvV|>n{?A!`g;E704UI=c;BJta=sa5XW^=mBkyCL}v8b+{saz$(HD5ZR-GmCLEkkwBUO;>^Y= zK9m*tD1ZN@?l!=GM0g+wE3V?KaSqc*H*YebbX*@{bpu*~zTo!!z{1d>4K;Z5XYzRC z4KBq5yk%X#t*mGV5ssnr6=3J2cD;IVy4q0iu8u+Xj;2`_PZMJX^P$HZ_HBYb<*a^- zIl)30(9&|QKC|4WeOONaa1v2|2LE@VZ_BU^`}3shUQnkL_pT<&%{+_w;1r$znqUZ6 zw_O(s5C7YR)L?5dRz^&+`znE;=H(OC0Q@P#r&>F925V*lL&1rF_wp#O5&gFGk}%^i zmmi{YsG*axHGQz%_6m|b5x{t{tG%-@~_`y z8Sd=iQ%yuTb3YD=xjuKamt!6aD_}XKI}UYkn{d~GSKLCw254K8kjjBzD}f`?gQ`$n z5}HvMV5c*5a%2xeVJk?cf^&b#a_g`8o&p6qmCyH48|@ts@~8aJGEw_cz&)U&*XTVL zZR+UBQKVjPF3=6hxAdT3o&w;BLZ{*}5&7ZYLcO(F`uTI2IxMF|v1<53C)%u*X?w5f zCMZ@t(w!GJ{z5aN)j~kf}+emb@-$4VBX3= zyoskW`gz%XEy2KoOCZ!=VD*ovz9*r?xakBs8E;9gk)cOf z9jObNu=1v!5oHvf39a{}X&_*d>Y=Dv+JPR(7T;q*9H)R@bA66_^;=tE~pDWLJX)(YZScSt%*NQt!-Fju@Ro53z z6hP%D$vBEEb|He0EiBxr)qshGtz{eJhHiFdAlv04IAHXE-c01nJQDUD;we@wk&o2b z0P;86hf3{{WoVfdWFI7}+*N~j>j@?DCoo-zT0ZYC!lCpyyLfPnL@h(pve^jlI@4fy z{ack0jmi*Ku-keBr=5PgpwOeIRrVm8ck@e~_pY&ZSo%NU9tl6Am%?U4T(7kxlk0$Q z)AzZqWNmVoz=rrnM{y6%m=So!w1snz(tiHbEw}^%qRRIw8>8}isjI`d9DjjL+JG2) zziYAs2Bsa8WoU#W5J66ejUeaH0}oKg04ap{^yoON)1J`^Sj%3JL8I08P_P`Ekd^9R z86b^}v(`O1Pp>$YDikbryRk@kTNOWt+=MlVLsGnO7*&0cKOO^w}_ilOfpaL>Li zt~jSGvS`we+w}denjQl^t(>sjDL#j+Vi)1-&(uJqtZWJS)d~O_X;}X@WS??l3Eelm z@UF*_($*?ijEp3{`wp%2N8{-m#n*H?Bm$)-E9(|O!Hc29?`I)`#7` zjf3)%NZGh?YT%r9kquuY4#5)NTn>q9m<4eKjn0=|lf=PS6^7!AkNE<20f#5nAkvB2 zvc=D)%8npYVi4Su>b9WlEk|Kus>mUhe~A9|mi=7*6mqvCVY$b($i%k)M?dIxe8Ovo zRSfP1!sxtabnIk-Cx4(Pf;2-#>uZ6Ez^=?(3|kb351R6YIM`6Xjde{1U!zVtpoe-b zhkw=7T-zJ19#Nyt zs|3C>i!ZVFD5(IaA^5_Ab!-CY{UbJpx!24r>e?XeHt=vg@z=}!A_;g?whqSaPm7@e zEvT^dSOuNTWFSlKFX+r(GD$4tYh&#i7=ryeYO0fp%OBoUEkewD1bX7FY| zc~`C)%l-3Pg9MboYrK{l0V5TY)JHmQWRR7E`{W^8ark=YWW6lAeRJ-;wXxK0Xq)Jhbz0lJiwAn&*FGFeoHKXAjmO8CQ zQyRZ)Uj1ej|77iWJ2LO@GREfLOP5Y}&vP0>S}2qjB&U#E>;ep^QcY^}BC+|zkuO%V zi@zr9L)?U&B2U?{Jxn80qf7n_)LdaBUkp7XvD@sO2O74&gfN93S-_mUkNPN!M?QIG zABrh^XGzdLVZh!3*dKJf_-mnco0C+8;)Xx2q*($NZ(j?vML=7Jvwap!c70KV-3NoP zMN+*8SD;?c+}~CDG&L_3kI>OLLb!${^~>pMW7g$7ppiOk{J0JpU#Aht1X}#zb*x&I zX4+ZalR&Gmxf}TE=$h2!t_@i@>|HCLFlx|8?_T*$7BW#DEpr6=OG>gD6JB%Yx{O~! z;OlLRWRmz+>6y{wst^%aZ)Y!xNk+l%iA!Tkk!nK@?x(K|x>KRmiPq{*RNdw$5uYL8 z#;cXqUatBZVvT0E+p|KDoB#Oj8io!8-4(3U369PLt1vs(QmK?W55ucDANwNA z%NjnG=}gg%@-fL!4Dmv>wvx1W$+nIuMKVOFi7{eKgX4C0|4OF(n?XI|<;<%9+i-uv zw}<)XbU}Bb@Ji1G{1X*j{iW_QQ7Ou!nA#In(R)d+mcWYVkEj4cIcL%!wEUPXB&X|% zRnDUdPMCDHW9CBELl9vH8Nun1nGPsad1H5-1~teB0NH4r2U9^}rZ_f$k=Kq%cDNqH z;}oN!TU*SX@}h$ZU6NqS=ua=y&xpX1YtXxiQ)5*a`Pfhb*aI5r7f>_$n<^YR*U^T2 zw%%W^pazNekt$8U0;;E-@-J^vytDE5MFmZf$o=_D<7&Zp4$@LxM?1E?zfNd{XcXF+;D7hfWcyHEGH|(i+1}_ zf3+zBaWOgB_EJoIw|jad-?xP=nReFca>UTf2q|Tc%k}rm_JHm_vEjfq9vY0rmaAT3${m>syky!%_Eg7Rr*Ux$|Gcdd? zD4h!mqbO*^o6bF~(SXYH&)j~@C>hEDiyf-=6ej0s8z>#NSdj)L9@SD6H1v7Bs{u1N^Lup5}y`G)p z_EZIjZ{mB*vo#`@741lP@M;Nut4cwjd6Px@?nUN%cC=JpG>4%om zG9z8LagC2)x%%}|KR)0A*1z+FI*y~*0%+QJC%PBysw)evlvx&}sm~$xnSu%OX=B3% z+;`X*j(5=ECDw-^KAK7PA(!t@p5Q0A9OM{VQlZD*vud>OK@tAs7oY;~v0uLviCr|) z74ua_<*qtgIC0hYkPiwmIU;eF_(y$V5!85#!k_g7g0t|KI+nZkqIia?JRR4#62+SmQQ&O{P_&W@5m>r&!gqzKcWK^R58 z35lA~JR?Fok;6wYh z3Pj^*316URg;zHdj_K=j+N*PrESyR*TU-e|Sh?i<#FVLvXd4@W*p@(LMQ6m;+-qVQ zw-1hDvLcRdKkJ#3*<;S0*e0ftX8OK}xp|dK9bCemJ-I4m5dQ5L#?9(ob~(`f?54^n z=whIDT#9&N0Ot%eCdCUV%t~Pt3qP$!H1&2DmvBH*&#M+P$+t8*BIvpiWA$bn|JcKG zzD(wpy@bQ%Ss?Y8kTMEGenk9XXg?Fi9Dkb0vjkD57UcRIr2#>M`y^-U-`pmffll)g z;7u}2I)L%HzSl-ZzZuP08A#nARkv%t6JQZEcXvam5h+59kG)_%GDGPdnx1(D7W%v@ z+Dv|JK$-0bysC>G&dM*-^6O8@8~<@aYBEpkV6I`=?;*x7?+S2><4BQECZ7}eb}FE8 zp5_Ii^NP+KfMavfFFpP0d+xblRoLr*G0;9Yl%D1rB3qoRpxz`O6P#$w705f z;_uU$Oq`>1JTFgzfe7k{-e@vwhCIU1)q>BvIZxVqLZ&hH$i{=@!Qm({in!eK3eSR1 zLos9hr!x1*iVPb${*c4BkU?X!I7EYr3;z+QvEmF3efb~?g~EE=yj!~`xGu@_PU$qa zLV3Qx3}#3q<%Xt~YTneh#>$&a&lBNKvOcmpuIdz1%4Stp81hY$j%ZKRLJqB{Y_S

Ahm+nP{*NeshT|%MSzlUFe7dsqgD;hBZ@qZOUTnQ0;+OYpI(#P~**SW<4?n z?o@-t>LUn3Kxt-hKBLcTgnfHDw?8Nc6m5Po`)@S{>^uLq*pSH$y~&rXB>Qs17Cr>z z5YJ)6`-|OPbqtzT@W97yRGUeTUU?b?wvnHURN;=)bB-tg)bo+{zGy^dM zsHrp0c4OjJ!#`416uiU6y+1w=_Fyx}pBd!6dG9=mCDRo)y1kJH>mVuvTjsMFjKl%XdIH^Q8Xg!EXe{<%4I z@VNk-*IRq3^RH(Ds9EXFQ?F`xcS=Mk$YkI?eZ==s-cU3-0pVY7iHOv?GI*ofi z+=BC&4Rg4$h(R)%wxGfH_;T|)UFj+qFiF*L)#Xeq`ah# zios1=M#zx0FnO@41MFPA>48;JE6&0E+)6{u7IGUbj$xyr-Tij9jVo{qu?x`w`9p1s zEV(*g9tHC7iY@vK7_3f2QSD`<_fV964CfED26(2aJl%F6^1}e7KS7}VEe*Dc6iKj9 z1cL!0KfPkcaNC>oFdy zSdo~`^PrfURzuh`eC>Nt%xxj=FpEwA$E&2Ipd3R`1rNXii1OudjCGUt>v@{-h6W{wdrc5!ZMplX#W zv6(+dgFGBkRamhAYkm+tXRf4QV$>gDLL;ofGDuy!NLEGV=~uLCY+bLTEVY^7fX-og zLM7v+fPwtsqLxUPftSN6;N6H{8~N5^F9f}fyL?j?Nllp#%Q>k8?P}~O^HY8bN0xU& z33S*tlYTDKIR%C^`uTT_x=hJA4Q5(V9xf$f+`3MJMfe%7{#k=PjEkOFv?_>CmzUqi zE1U{ZHtJHIG=O23dA;P9D){+-)=6Og zCdbq{zQrQy3k|9xXuRJ#7L=&J*&8M~oCGMUl6Sp^7q55^U8__k_~ z56IeO+O&I^y;VR43)r!UWJfpF(!pJMw%Kmd(IV{34JvI&B5n02_tL2x0@ANdB7bIc;hFM z1w}a%bJX8xEO>0-zIw1+yKD>i4z!b(RVRTE7;DzKrOvtq+i%22&-%(%yb{d@s{ zeLHbTbFe+tg*;Q|iJ-4h(z`ePNZpdGdmsnSefnuKQ!8KDaKJGXd5RJziKZa>6Glq( zhazHy1fZnT&5)HS8e?2Xch|d^eE9&crbF7UZ_IGjp`ScC*Z4-_%HM?f=}2c*MoV;& zzHbln%KRoVoJ>ttOvnH4knDrTQ?s(Z!N`sLv?Sf&_TKTIKBOLwC}o4+FE5zEabCZc zkDS#K9hR7<9GS)5QijAeQ!F|Lzsm`NCEKWvRWD?ZYDjlOjOij=?@O#mS^GaJb##b> z=@E$ZcwIiW;Ty^K)D~$*`WFp<66?>|vr*JMT<|95v?(sPXW)y#Fj1-=pOpE2E*j>e zC|)6L?0to|2{iv8msg23vq?oRF&Gw|*Gh8zp#d6D+ZhsZcxW)cY{dOW8a`ZX2^=lz z3cqlvKtmss$}2Sr68w;6n~>a}u@X7iAuf7Du-{-6v=lJx!Kd<21&}6)D(6hIWzVad z^JNev*Uu?`#4M>SK~53#U<@jQT5 z%luA`5$N?%ijB^R>c_QxAG6x(rQ4R^b}QtE3PJw45!^V3Je9OuJCHHB0<2ea;|2V4 zd!tX;jn9w#59p@+oWW0atPbx^%viyx;5);=35DoUWq+=Mx1mlpB_NZ~4Kyt{T+vFq zQwYdtp%@_pJtnAH-#7@(~d(Ynr;8-Sc8AP{LCn+qj9v7?$<

{Ik{qfVz8sj;5>s}{OU5IIS67*pRK zSp_7>naxY@jKMl`rZaNS^wdTAR0YW8@LgA4L8>G49Q;BNZN`Mgh{cwqSaBd(k8Te5 zhkjeKXy7btzFy%gq1P)1N;e%Yj8g08ey;S7niYf+%W^NgSA6!9+lXjsgmsP|WxYNy z>Ajw*m}xK{tKGZ)2(1P1%}e;nLApHm$`EWZL`63JL_Y368N|LkC%bLoiFMN@CoFPn zwsG4S462f#Rxz?x%AI=aKJ4G$?l-W%qC(DTEK&?w&zkgVdB19GKb{CtWg!9;$d8EsBgC zNwt}t>I7v5#?AhGtM=4k)_KS9q35DYHU@ML-TvZSWOFCESz1!!`*ZXA*Ge9^m^OJS zFd4%&F`{e%ttw?E@`8hlG^cAIyDkBDvBv3)7|CgrnD~t8dps$*j;JJee)mMm&v!)X z6xub)##=7g8|Z`sHy)?b4wcQg0}Ulmus>xs`?>i&lI!UQszKqNqb|e6cAfW}V>H^c zKmy&TdQddh;>?2SOZNi6*syNuf@aO1sVs53XItkG1J0SS7cYaq;d{|{O{nz`bg0TV z1ZB=ZN(t8~nBGQc{|uJWcvtS%4uh(4Yzd|Y z6H*Em70JUF9%RIr9qrP8`vN9c-!;G>?G-eQwWm^_9yzkgNUD*g$IevNn9z<)l^VBE z-Ak)z1~D(g;i;Z_$X_nc9e-D2kXKVWl1i3QIfDLO#j%q(Lk&4f3feTc0pi0}&2(Nw zE0|d=FVbuLhtns3l~a?247%nX`)y=b)zW-q|NadoaL{T)Otwi2l*x{pwUqt0@k6Q>knD3)IYUFe~JAzsQOi=|;>gq5GfeC`51Bydqxj)%UjygX63 zsYWrkk{Xua>cX0EzKByAavR_7oU!PpxEC|{EVqGAmmbe3$jeFU( zZn8a9I*BP$?X))g;wKmEkc`hlD3D4Hc8{*U^(rl@e)ho#2t1H(JJg}B$Du@1%v*RM zj#4if^AL}kQm9CmwW$^GhtZ+D(@G!jtMuEY1z$klUHQp1xQ_Wa?&bu4K zdE}(fu?lpsP$@cwVA@_YTwU}ujk7N%un<_G_K^MU5TeD`B8hjO8^T7vT0BQZ2%3b3 zCb;qDPn!i0I2U@|f>^w=BAeOHC}10aa+mjyNP(n@VhDtuL@M42kRlqm8h1iye948O z5x4-2lkOFjRhvkTEz?JCLc6zp;XxP-T(fOcQ9c46oh9AvRQl>98_IZuobD#U9<0sq zGz%1^XczOaVQ)!|azLT1yBND2CmSd2orB7L=)aiU;+O%15 ziZH_QVGD>NcZLM@;54FYrQWyqEYFhBZt$6d5rAF8&|HklPb2TF2cwxhv6%Le&ThpELSO}UFc87`10L~k-7rgwGTotKnNJ{bR0|3vCwy40QPht= zG~gopsAjKken!#2U-LLUtH;b=;WtFj?&$c3s8+uJofNScH*Mf*BeyDjsyE!v0i2a|fhCz0<&H~W@2V+2i2`i4 z0(i32KU)CpfKR%+ z5x76ZG&qsnbwY(nS`)dlzZZAGogl5+Kv?d9o(=I3YT4+H>JvDr;ZH&VzGK!o(PhCy z7Fi%{CBMbOX!5v%2`{rC!>e9<1yhAyIhwDf_dXP_Y43T+z3Kuq^D&rRLq)KA6Z4;R zS~$-hUh*DjZa5uZsiCN}gLmnEb*Yik=zs5&tp){1z+Pb=Qy+WaQgxZB!R)4izaW3) z&d`fC2_lJhz(V}A6?Z5U;KcTmtjGd0P*04RQecfl@OIHO#WGjk6$FVUXSk9kIKjmDR?Dm`B}-fysZ(50UA><(GMbG8%J{@m*k?4x#zz!z?wA4;HRF(-Kvj{wp=YrgXAxGWA~ z(*e_RNN|Tf$s0s<`d}zI5kB`)g687ny!}tKZU-;oFCpj?S*D;@2*76@Or{g+T(iY;}N_xf-fo-fV6M5 zY}w>*-M3xHwV9Yz&KF4Z@~-|^j?3gLJ9g(%Ibx9Oi>eL=@?-LDG_8h|`+mRj7H&El z=wp<2>q%y7#YdY3Af?7SB-wuiwnP$?=oe0T0=ANPiq}&Nh-dg%q1JOnM_WW1P;M#( zUF%UL??0=DZ|JJ7TSA#tgk1Q`RZCT@t&D@n&9&WwPbF|7VX&3eARZ#ApMwHId>$C* zu>`}O7RbCqaOBekutPm;l^JwLVwb6d=AnGz+ggJ7VE_C@;q0Qk^V!a&$rfG_A?4i!^-9HAGtt{lO`0_Tu<3lk*-KZBe=7 zTAr8L4Oj*yR=fUM>VQQfYYJ#DZmsM+EQ+YhN1uSO>87R`P$`^P$V-gpD&$)@?K>hq zWom_JV%U%uc0!KPg>}w0yfQ0HuCdWBnpy+XEUi`bUJ&))*qJ-FgGoRwS&`*io;?IW z`^x;>Rsdy~Ok9eft>igMf&EmMP4X_*9nznJhtK{_zjqQyIk2@v1LT8fo%(i%Y2`?t zg?M<=(^n#0;Yt-eLiu)p{gH;Bxc^X*Ls(H?2Z znU$ki!3tCyt<2`tI}nUF8)he@T4Tg4x8>eo$FUbl{S6(`)w(<)uWx zGK!6GSB2FcGz?K*DNJY~9B=RfS4-oQ?|d}osirmEwWC@ygiz-PjQcyK^}I5b z4Z-5I6x`5(6G6ICUbST?gZvUNzskn@_ev0Rd;PS4kegq$AVi!pN?1KSSicJyc6=2v zxEBHe8JPc__@m)F5AQUJZ!DdDLWY|NXNbVql;T5wRprKs6i~27Sf9i1Q_xd~cHs^d znkYuyC0;m%7@}F+yRM+pUtgVkuBmS(ZcFrJ^%E3LwPZ64EzD#&vAF`=wPs@1dk|7s z4hAUG=@UyS{2IlsX+)`E$-LvXSGx? zh2*`&d%1a(+Z>SW@?*UCu+sa$s9z(yyde!*b5|{%P3T%~2aHehBEsQxUXgN=qmS3s zH$cRAG9h5K$~$F~=Ix)x!)5(N#nu>gU^~nHnOzk9&M4Ao2L1v8W$J5W)y=IuYYcZH zoN*it`J-0b)RO}Lx5$wP7BtrH%z&#IKMVcyD(OI%AV?t zmsh*Dypy3Y<&e-X)e#;6fzo)}ephZ%$67=#KWqdV8EKXPhFApg*V2_eH_)^D<-W$?eZ-2dAaG4ebS(EW$-Z_kk!P=#-LJI`5SWji( zUeoQ0EyIFE!vl|YY*?^BcD)C380q{)4(B^J(V?Suq0~Z-!@EL~M~zOXO=&#r6e8EY z?&V`ec`=fxpGPfr_g2iqA0WM0+T~6b96#{iE-7^^lzLV5w-Zmr{Mtwh-tp>I0s`pv zxUzGqIL{AwXuV8Za?@t--hS7<&E&n}3iR8jYNFlrHYoKg16FSt;UJ-@xUK8d*S%w=VCyi>+a*{wteOf>qQ{M?klS@^m# zuwl>aE(*8RwjzjUSORB^>gx@cL*dhL^hyrH;`0a9yi&R9A%_4`C%Dz3!-B&)?HTqJ zGJJto1qnr(Ouvb_6l`$ZhLa;Uu>KT8==17|q%%TSZ(G#$1b5UAO{Z5sEeX!wL5JRnC>0x5GbI(0M7%|$&S(iFla9{J&)M`~ zzH-g6U-GUxwmYrQeQe~NY+qA+QvDiQ0CAU3#DL;Cm$V=Uc5(Yd81+E1x#WH;He7s2 zO@mJ3ugk{ko=NqUhSw`dCuQC2oz z1g%uIi`5gd_0ubKQnc3OrSeGD#yGC9SOy5LflmD|eu)qo=OLQUr`SE(Ns;<%L=6|n zXsKuR;%9g#o}bk$;L#jSN#)SMZ%dn=86;^DCM?N$2Dt!=k+?c`xy+e?opWwj_2 zdZNH_ClyFQMAM#*W>9EfKwtxpE6|)qvdgU)8UnkE?U!of5im-R?r98C-^H$q8uM_$*fCphvVIBe!~`(<+;t zLU86cE(Ct}Ka8qP!wp{UPRtx;@i01jwc0I__%D&>@JUQq4czprH=I(bq#-ZtS$SH! zqa_@VYH@z)A}uc?dp8NzPciL%RD;|16u0(&CmUb=s&dbtK{D~ad)f8XO`XNPhpbwR z*Rvj34~rk*eZ6hGP-3t`Q*FF_@Zj}Rj4S)-HxiMtX@+cMa!V+JFJMC65pPWw z^}OJs<19Wxo}~WZ+hfyDRipZF+t$b&nrXG@w#KP#4Kcqx(|;OE{4UKHJi1_GB``^F z!XQ0pW^V3dbl>P@b3{MWFhs@yMnp$SlI)p~eSvm~A^7;IAfd~(zNay-f_nzL947NT zCWn_uaVD-aI6Du{zjy9I>jTx53Uy4Z&qQ*PomDtxa<&i^zvVWmHCj#;+RrLH7Are@`1032!NUi=Jn z2;|f96R;xESbKAjUN4VYJ%}0xklDtCf&B^jG!XOhL;+8!sy>7B090?Yyjg2f&!LiS z+>laE+cnXShIf2*Ds8OU3lwr`%73ak>lLJ!Mx4!R8Gsj@_%_SK_r#ZC{D9ay_3nc& z7f)qxTcAX8RUA?sP>?sZzBO77_oO|cJ1L)m#!QPi@``U`Rf&S)TJ0TqjEq-(=m+sd@v~*;N8?^srB^|Md)`H6}4kOd7kt2%N36S>*+%GFVn4U8XWS zL}<_0JZflGC=Ee`y;@lf@MuB>21LJL{QY-Ur0-xJq#OL;GYAEVj=wAS z0x6)gr66*2f5Ri+bFZKL0k4^O)>I}3PR=<{p_itoXn;uM^&zhW75ALicN~({aX&Rn zW#7eu{*`LA^(nh92W$)3CH|#l1o*`Z%xbv~=^{b_Bvji3n;lzr#ZnsBh8{Hz8DN|_ z)il8%+5%{9LiCZ?`;4*hO+tp4gl{Y;dip)2)jdGxa%jltA5NXjHT$z<}0 zQ+<2XO=(nWiSlJCJ4Eo*`mipHTDg2wLR6{B^Watyq_MRCJZU*vTgHp@B1lzf+ZG?Cl1x)^<&ii`Ed?ef|R==1D!vv_JH@ciKFIeO)n z_=0fio%!kpHh7OyH-$aSbx#h zQTt^o_ zcjt|&b66$fUG=!6Ij*!+0O{`0^zxSa#nj7Z8c(;pTuU$9B4vBKHO%hoJtTLUNjovN zCU2SR_bP;E_n!KzB%kWldSMDRgNYgE9aR`*NH$kIQ?KAJx6t`V9tx?}VsUFr^h+d7 z>$yEks(f<>^?IY>L>Imou5V%;&K4a``koPui*c3E8*GkyjQglVI=OR30n{ypYwqKb zuC7h|)-|vcD{X#q*E&;IhSoEFd-ft-oBpqk@3`(6@6D7frw_M3Mb#N*u8_HHY4(ru zYL6>4vXxJA7{2*!i&G!v;C3bNCDa+1+$$}%6UaL9V|M2Z^63<&7zG-7cLxXLPlXGY zSp!cio`+X;?{_mQB_+Zd^OavCOCfj zi@wL$YCb*ZZ~iiGsUBFMJMKoO5B6ED>;!oCyL_*;_$cALKGl70QBjy*-tzB^Ih~^y z+1+aM@e=RsvA^2rwAg-b`Uj)_MDwMgtJh9rC#qyQ3XDXuGL9bhIG>_OWhYQecHsJA zS#yp34)NG=72t~}&a(G5nu{s`h(**7{ zaejQq><5rNay$o?3J`RG0SdwxS9yde*=u`*=$bYVk96BJjESwWle43Vfz3aZouMTR zCli1X@DIVl!p!`CLFWH}?CgweT>lqk`47s*%J^T5ne#swFE4;W*~8uhz#wL4>kME} zaxrxN7yA#$1Yi&{a5DLKiQqq*9F4S$jGe6=7oDPsnTxf7x|-w6J%!bA;h! z{VQ1!z#wXCWM^z)YX)GDF)&iJvoWy!A4JT;(aBlZ{O{eY>;ML7ga0s0OdJ3PH49^B zb0>fnfQ^|A=I`-W+P}|?f2n`(|M34k{)_!Tt}y?P@_$MGpVt0I`u|Y>NchYBt55$$ z|GECVfBzfN0r2zxSJD4c$3J%Z|0?=_6!vcg${N`GHNpR{imeUIoB%8=|E?<}Wakc} zMaRm?44`9SZf5YN{zw-YY(fwbR0WdMM z{JVr%AX&II6O|5q)BfAh=$PLBW0vj8~%amT+- z^Y3y13llpt*WX4gtes8%@i1!xXA=>Vzy4$b^H;O~c@f-!n>t;T(brMg;l$ms-{t_ix9mSmqvJ$|8@$m@2r^DALKG%<5SMOi%bju)lYmH{P5#44J8uAI{mW&Y^ zfs+CAfS(-X?53)|{)LGsqsT)e#RyHVtsG9kbu5hyj>L)z5^X?Jo7RG{iKezce1YP! z;D7ucfbT-B>K`8u6910>tuD%AdCph2;nK0W`)s zh`C@akW+q4MmIW;G59Rl8Auar@gf@#^BJ{f1&;Hi{vghktSlhydr(wJSV<}r89-H9 zMFkFI$pT{B#LDXDvjWB)_^QPMlFZhB-uvN??q{QxIFGilsFY%8^okDZ55EiS>;OLU z<>ycY!2TDR!vEJ0R;d@GMVR#vR7uvBN z)ZecI=rW)eb^pp7TJ~2K{0#$WuO*0lQ!@~E2Z&#NBE)C&36LzAJF5EC6C#s6@uu%u z)3+4!5z&`@79Nm4jx$S%HT9_2)!xdNFYu zx`#XP#l;1Xs(q3qm!_mY9$sIS4zgc~+&o4$1xI`o$AsU;`ZV&v>B+laM71?OUA6Dw z*8En4=*0g4T0o`0(bm}=C?^T{OBpOe{3Ejlx&c@LKxY8Z%fg1`m$To!@=MJ2OAKx& ze_tn%6Tr&M-WBL?YXt=VK=gGr^8fB?dG3d+GmlChhNIN(VlpmLNxaZ-6Dx3Xw$_he! z|C8wds+l?1+I#<7@jnVWz+d+Ptun~P!OZ?YYqqY^wq8I>Ra-X;o8LM7D@-{zGjQ9B zIa=ET!4>%(xz?|fWDjmx@U>(6>(&A=v2p!(4SYH*>>Povt^jUuy1@wpf^UHT&ET)H z{vr!tkyck$QP5}jPjr8IlXA2GS=u^U1K7E^0cI{PX5NUb;3i<_;sW@xfxFof==Hlx z04&UoAUCilfRnqMKfns)g7~YF+*|+_v0tLU5I2BD{14&*ut@wtyZ{!-KZp;&BJ~gA zVFi=?gV+EpGJg;|fJOEX;sCJ7{Xv`n7WqF2%thf30&`LPgTP#r{va?HVFWJi^d-W=A!urfw^e?L0~T0e-N09&L0HkqWcGdx#<0a_`sTG ze-PM%nX84Zt%a?Ng}cK)a!xQSGw|i(V(V)6kB|>+*8C3wtC)iakgL6!tIa=BHg<3X z<}PLy;7i=f?O$Rr`QPHd=G))Z(%tY8+ve;~MNS$?0oe&Rb+QM~?7wx`*uWC!e<1fS zL&eAZy&6YYS3U{)OZIFx zA)aKx7CBI)B)B~&hq?r$D9djYkQVG%rb`uuZU3^~kyR&TK&J3;n!lQ3TFMd040ZUUYqij3Q0lqdwov_VcQ7 zl7;9EBa?)iH64?+@q5~UpVDt5Ha%K%&%f!svyu`$?@~K{WzkiKU8JgfWLX;NYyofI zqSqTnSRP^YK_z>)a4){r?(;%F60}P!z<%tZ)G>$`7>h)`?Hy3Y>nxQjSUe>;W8Q2; z7zi8oZvBNaUy!P@TjncK;iRSMoixftUZm096gl3Yd8_^e9}u#%EsxjV%|U?MTDpES@V-BPL8 z-C%z?`iY<;H4?h4p!P+aKQ-o)n8)0mcUHZ-b!*)h$def%=M*Qdnhciqa5jU7#jhb` zQ1Y^nuPdSV;B)oHz7ryu#xBN^G??YkF7Yljd z5T29q>m1lj-6oJ_zQd2&H)@rqQh?J&H{HbJ-GCM8i5R4vWSzR5oV2dEyb3*XV_Z^+ z*W^L@x=HCsE^h`xTjXV@ciX7_{utZc_iDd^`ODqVQ{d!c*g3{sBJj)0q!A4c4twJD zzWI_zI>u$t%0bof%dBn62Ay3#!}e{qW)HuF9)%J%*Ah=%yz#uHJcZ#JjkEU`HB2RZ`<#Krx{17e@QZ;UB`qz35?72g~?M56`-Iih@D)?=v zXqyt7Zw_qGNPTe%|BRapS#Fbf(;4#VdX*MFc+Q>p^1$kVJ|QAw>+q!p_k20Zx7amV z2EKWK)(|os-v&KW37txWLARF^m8nB!<2b zbV^7f@tR4S)5ekRD(>D0_R;scD>yC)CHvDfNGFLH^JS8LCCL}0qVAlCEs3{H*9Bw- zMunezI^%@9%Sy^CJ~`IrEtKxR%2XHDXJ*!(#W_% zr9yNwu`x&rNcwy4KUuI&r$X=?AeEnTDn`At_#i9Yrem0)YV5KH^_VtkMzn@QwJ)gjDw1l2-U;2NzT3Pr-q5SUu{UZ(SlUrsrFjqz4`)QL4f*p0IHxEHf*1t@f&w%V6zGW5A2A+ zyz|9QoUAkR=NWedZ-%+hb3~qdT^5eUZ<#6Lsk`*_R5&6V^0qXYfdyvMpTVN*@ZRNYsPD6fiNr8tjcKn$kSml?GX0q zTR0vOxP6UUEoT;HLz8bK3$krTuz;XT+yev8z+_x8Xus#sy-y~C=n9z0g?C)I-Ly{c zcr7RRN&v!$;SGGIS;K5QpgO6Cx;asX?5m z>sut~knnb*fHe#1|(u|{O7&1T%;BE^{YHGH{fx6W~nZul6D8KNi+KhuL!1G zHbNwFx&~$G&YDd{GmejDkJiL~rXA#o&-Yb)Weu6v#>LF5@r&iZd#cFYVwg-IS)!laHS|-hU-`uAT-BustoxGK7U1%emi*cAY z`rl;GFyZXrzwQww5}C97bFp(LKS8q8{3b(@ZL zw()|sKc#I^I35WO4ccaFBAxi$R!-x_eB4S}tB7D1UKs|zXL^VFe8@-IQ24Nvt+UaS zTJqEn`&oPyrLOgamCkc-k9M>Hj*Nj@BztlLw!#(4qlLHX$_sjajkWo`V|m>zZH}49 zdxizUpEt9$5+gj8Xlt)C!pz|Zr4dBSHfF8-hsNh}Wgtd16#N2Pt>Gj^0^=MU?b|bh zY^Y(J8lxtR)l-_PN-xyUL-s}lA7?_B(w2ith%T$TUTF@Dr%hkBW0aL^^owi2r^}7$ zy|Z-t0f)Bhk28gdEp%|{2^CC*_@?*FZz|QF)c)n5n2|buafYf0u(~ofubvnZvu6=x5VVDQGJ3EkUSOk7#L&*a3w^q+~tJhe6UXc6@xb zbdmd*Zc?K~W-<83VB@((d2X|nNlimP*v}Wy>nShnZlc=QDVOw=Iu~{uTRUWv6UBo~ z?u@PPcyJht8GJKGP*b^d;}U-8lPG>HR`w#%ovJ^6B2gOv>b>FMhL}DTe$gmy?_h zZTudBsrF*z=@}%fd**^KB}tyjKNcFo^wKUdhj#4 zX|0LI4LvH%xr&<>vfaYmWnsAa&|mwWB3xsWjp=HJ>Y1$8|9Jg@&k@%TK|0{!0>tAA z>t^{;8qFmZ)!~?QrW|x68DlC+Ab|0;_)uNc`#KHsIW;qu+q!4px!YVyE%>~l{f65ZJ}I_r`=xvQEA>54ZFlLhk|mDIZCc%k)aGm8$dil=#;~pqX1MPTc=E7 z=sCZIvK~OcQ4>YlP@fLU&F+QS8pW0l*=D6Uku3 ze0uq&pRyUG z5uIOu6CwBQL8T3zO2ou!M$N*bx1>?hq#yi+Zilan;~7l8~Yp6GkAqCI)1MJr@7qs@D76=xDPc#_DORech8*2c1P4aSL_k9a1nr>fykxxC-mMij2}L4sn*q00g2j|Y@))=M+J>Ry?B)4tVj;== z;m4%M2wUc70&WZ-9&JA=jhrZwQg-Z-DuV_=+N@i-C6nIN3124z!gt6{%zg{i_t#t4 z$FdqN&j)uwdlKB@veBWGF9eO}cRRBjIfZ$)`sg>?#r`I(YY>Q5`lqL2SvN*;$4}Sz zFJpXH3z6Izfd*tZ=|nNvGvdhk`mAzJ1k);x@lbQ^AW*w_9^)DDc01&e+~|r{q`u%( z>NWYvQ!R4OmvLrksW-OYIWs;jE8yQG^g2^LLM206)hg7d{)p{1ZCQPXpKjwvuo|$=W+b>5kP2%fsoA7JS)Yd3l^}1chqC3FI!_sW5LbDN#Y> z{Z+jbVkbkt4GVD(wG{Wh=l0i)of2-LT9O(Y7lc}jDg6mzdjLDB{bChg>BOXFJO++l z;JgLT%9!n;uX*TiU6$FDcfQ6b2T2bGXk z?2P(dmIQ#bpKVZi#ZjZ%H3%a}+m?rQ4cS(N#)~l!yE1a1(jHq3I~h=13CkJlwBW{+ zji;oDFK>qD7(0xF25`g3OYBSK0}nRM&o^wkhf%8->=Ob8+A_Bex?0nx`V%UP6}>j? z0~A@;{OWBIIJ>rSZgY1YmgY_u)Kh(~1gdrw`deH%A0aj=ndo5iW zrpKSDg)Zd2&BMq^nWHfl5Q5rJKl+nJwN8(8&6J;yYwWhQa)dG6RqzYd(lJ?h)J8MX z#9^Zth?~3Whu|u9V}DV_sL?^U+SW?C6&ft0K0dXs=l&vuSCw?@Tq4whip5%fmvCTx zZ5R@hq`6i+?CYrK$O0c}LtjrF&?T@kdAx*M$L_rL4le4#KeWQ&%!KE2?)WH?Ucu}o zD>+`=`_nJFo2*1B7+uqF0yW*9&AIy_LP9j9(GvMV@32=q`5s!9HwBh2LLY6|`(aH& zir!1Jd(S+73_-15LqDR6;C#4wgr%n32zvG2sE~(*c}IKFmg%;Qr1m~Aq6*_f`_5_{ z%@I_Hdf-ty&@?|t4v$GP(1|bu^ML>$d)ooDhR;n1 z<)%jR?VSh~qP#p8SubB+{@IgD09>W5xv|`h>22?=GoAn|%eA&OAL7OK>+S@U$>;hu zD}t=;F2Xr3U^*cokLjt-R+crdp)1#UE7VMkkzLakmMx7vApySSxbiTkR!7ijRc_S- z^l=8m4430Zh$+=id}4(6F?XVgO+;|CknlAngLGyeIePZP*_)WLS1Z}JweQ5Wo}ac7 z_4ov$@@2A_c*yGtH+1Lw_C^K=5{ zT@{WoyB*a=Dy!SV+%nq9^fba$@ML!+K-bNp3^qO!9*n2<7PoNTtt0 z6O*|C7nW^>#Tpwm%Z+Qjk^<2?s-@RQgSF{XQ00$z+4NJ=+A*i76D)AZrX&yhJO&>F zdPY9f45WQm!q61A-;z9cx;%QqcKNgmVnMT|WlIzSWTa=6$B1SByxnjDW--7kX?sYW zDzOY?N6^xX_c03*i9JheG5ZxZ*+Go}VV110Fr?ogsK=Jkga86Z-$xCpIW9N1iL%29 zcpHk{B$#C0BalsQds3tRc*A*dm~o1MAdj_471K(1!LG+kT1P*k%j`p080k7cjE_8w z;zPg6d(xITDJcW=`dWgfC;f%m5$@wnFpkR6C3}BPOI`v33{s=X@idWG`Oo%e=8lo# z&Bif-AK@Qc=e_#sB7&9^++@wTDAa;UUrdx7Zw|6QGiltFboPg2y`vytK@xV)XDC%5 z7gQ1L=c|e)5F1_W)Zp!1jF7}Ku6K{eS9|@wNt@H8)1*^jV|e37NvU;>Da{+kNQ$@) zdE}r#h4&%WQJZCVYO3p=QGM2q$b-FK4^}lUA2(xBfWSt?F9}vpC7$?tHmVEzXU<=#8YZ}mQ|MBoY?1+_zB2rJ33n!lf~Dd7h6wo zT(EA_aw=rjIWqaaCio$>Do^MTUUV=Q7jsjGjTkN|4%zn0`u)Y7IuRWv6{j-lw`Nf~ z@gMb>)&`f*^upR>5;-Q9XZsdHB~!82yM*hcDImp=Cu=K(8*+1jK%$DJ9lg+uH>(BL zB^2RX!ht*t<_0?iV$BD_2sO?$z1B?S!gcgpE07Mc;ZKq z;K#G-PWEoBCJ=>-OY=K}er$=+Zr=H$qb8T$3wetdnk+d;^5M^ALL%GnJ5Q`A`pYpK8XK_m(cVRn-z%(!84In6sxx}dwLdRC?qP}E)VwhYpT!ftNM-!)#sL!O)-T>2e1Px!l z#rK~oduGoDs>~o5YoyT*c@7{_*{6ZUQ5%8}BXTurQ8+KS1qD7!IJwtjnzfZ8WBDqt zDB$qtB)z1`94ixXr)#9%`Y5IBQ9yC1pIz?|po^1@vkQJjdDWJuh&(4_QKWj%w$_o! zpYh(1I723H67QBfbb1jajgK6VAO%|&)DQsAFh++vXI(0O!j#Nb)_M1^q}EH_C@1u? zqf)H^6~&R%RYp<#^P8wOqotI)qq4+AOxZZ+iGm*uo&uBQ%~t*DR5Wuw{?aV+!|AMd zh*QT!e%b-d5oU`PRKye9bAl<2Ur(i5r!hknHB_?~5&k)`n$jFe;19^LV}x&PpZSb> zrPWPi*a~--rvM^yN4xaA^Ym*A=e+EG3pr&rv5mSj(w_S7b%_UjkKVaHN+Ib5*hhV` z_0?ba89U2nrm5W3OHUk@OUxwIH~?X^M*a<^@Tyjcif+84aBQ@p&iAfl46nX8n8CU4 z?PSLm4bJY`M3MVF(9G%TlVY(b?f#bZT?;=gKtC;_Ph{MF<{3Dk&qk~6iBjdy_l?X0e`QTyJr}UL8MKaZvoCIR zVzy}wfA-Q6fa8y%8IHV*nH4Yoz>DtR(EpJ=d9NBUafNIU?L6me|9Y{pfDBuSM!}u` zy{+X)lk?-!);h(yP}`U4x?rKtW711Lr#m)~#a!15uhgQPd)FV9b<*S7Gqk_;vTFCc zjXX7S@8$BUwZ`?E7UTEIvULa-WNYg5O**y2sN!8fvMNFI%nVh+acv8bL3+D3)eq@# z)~SmP*A())V;+1l+NimwxV5Ob+|R3C9IPcf*xX&ian_x}J^!?z3~6BUM4#Wq)x+k( zAkMO#BA?Bb>>)~J$A*b~i?L2Q(IRNB)XJz5u1eBAW}i*ibNOxj!2u@rTi}uRRRZF+ zY`{9=>$gna&iaw~NxW7nY$UrULrUp1TkcP+YmDvQN+Nkfs|KhSXS$9n@EDXeSNGK( zWDwcp`$$WhCJ_qu=ACP2aWObC<>4%t=p2s}W zw7@0zj;l1U!!^?IFpKqpWNSb=WqZ=a{RCNBI>wxmwp;G{`KjUquV(hHh)cUZ=v7P1 z=DLvcJf4gpe!j%Va=@SyHFWj0y&CjlP(ja2FU|>vWWHO!Xkhae>R=UOfFEh7KXjCt z*LUGBUJ7tNBsHKp7`FD0;v&r~S*Q&V_PSrET2# z3%m~-RnWvRLB`>B?kf1d$E{8(G$e za~&xn)5<`H!@hFCWz%23KWjUD=10wpa^l5!A%*x%PS9I`CQ#64;wkrbaKS?g*E~~e zbGfcC=Iw9^D!azTLa-dmd`Z<#i0@cg^x&YX*T7WFoR;adWN}(>kDZnp5P`@@3=RXh zD@dTC$IzotZF!XBQ`+&qQi^;eLF2ms^`nxp5UyHvPk%4~S{+rC+b~3HiQn9+IZqSC z8yyjq2Lsz?sdVI=c}yGwm(HzgQ2!sFRj@W;y?uOvW=P{^H1I}zK6{3)mv55Jd3I^Il}WwL z_AK*EOsqqDK@mW5nN4I}2&ZW|F$hqFxfMicq{*9c`S#tr_&cH7aFvN?WU2p0#GBqN|O9hCAxm}&VwonF{J{L63uez(^o)5P-9f8rO=`=$ z-{+NP4k7vVDO%0Q8Lu*3!mZrMm_IzpYcG^BY94q@*s~FbEzCOiOB<$b8qD3azN&%? zjZu`Tu#D06iPl29`cZLS*Hm~rp@Y3hK>yCN1xYqq>eQc)GoaFrVnM81@q85b1_c7O z2tTVErepp>qpuaK6BrN~2a`O``ICOQRZrsk7IF5jz&@;L;4?*c*1XC`W?;lRTflZV zCx!gUOMEZ1Qzv3jclmx0^N3YSPj|__rR&{Aycxk&W}wTfwdoXGXxfjOXDc|}g)p+b zK0UWC!8Lb+96_aLHqMiQR~75tycHBMZEzhqXdDs{_K)9C_}Ig&ZaX=v38vVb`TgG> zsn-%gCK|&TepXyMm1AHFBQ+;(tHx*{eM;Rg8L9T1;)wBCV(MmAa4A|&E2W80!wyXu zGRr{ZP8(xY-gpS&g=m>~r=s*T#LI;>NTc?pEQ>sMz4=dzOHJg1|R!PWZ7(j=iMkZMSrfs!q`1DfP$!DA$wgn+-HpDzBK zp+!NsI+ug$`8*S{@B*Ur_IS2@eIi`dt%iWvRRSR-@(Jrb119m+xi%CZ<5$f$4-oCo?v@|-sPKbebOrEvU6*-IzD5)rL^&+l&wrS(>(Roi zdOENlnfMk~@1fEW__Zi=hyY}rIn0fL-%ezfwDsY0W)|?SyZ4p7@acn9+9teg3U2}HT9;{<$>)!XGi8=qTq@0Eut+368j#B9Pv4+&b{U|pV| z^^m-f=X=9^OHN(df}0PKd_1D|ykE6e8V~zUvAF zGT+%Y?d0x@W}_tx*|_~ZqKk|mTgb#u2XyQr0`G56{P}XXA1_>klY0WGq#Mbj5Q|q* zU5d!l#(U@)Mtps1*5DO;v=3aNL^*5eX+qyOPa9mnYe(m}x~eVg&ANj_@L19e|0-V; zM=`+C;=+je%KB$=@+gsm9^@&!NM`e)#9~QtOvaB7A2k~Sk97(t$4G97QfiwZ1G1T&sIQ&a>JA0Ag-sIx{pi%QS<4JSfU1p5{h8 zKZ@8kn2V<8{HV|}c2SG~P`|DV7j=za^7=X#+V(+r5@i7h>Tv-M{${6V4LwBcCdPMy z2yfX;<;62tZr5AQN?hoJ5o@26uozKFMlaPSxx~A<(fk;z8)jn~D&r?giVVA;a=DFn zX^Wo}73^rzz9_8MV~?_(JSw3oL5t1_P4L%G;qI;+#B)@q0!y1NQ|syWjyV7fgc`Z} z-)B;~XE5J#n($cH?py>TZM^Fc)U-+`XvC~gM`_}RH%807+xpL~tQu?{C6AL_t(P!o zFEach zR1PX07Tw$u87`X(5I0?SaBPnbF&gZ;KT8MFXKwFr#+2^T@%SOn&GuuGRZrGO zn=A0d5tm|$k17d!JIr^3gk41z#POp5>t*sb`$|JgZ?TBXgvv9XJV89RcO4iLTX#!2~j;2TXrA$k%={fjt zJsg!Z;C8%swivK@2e){mNUY!VQUq^VlGvN!m#b=vQXlmMC(C5Ib~9m4o1pB47V&!2 zEjJaG-7qNI2jL0$JcL=qrnT(DcxI`f-_%GE>$ix}mQZhnpw#F_guOHM6K~ zi?8j0IVryBh1XI7eqZQl1rO0I0gO?gYNa1vJ5ldQd@4*k>`RDn!es|8*a!^{UCMW- z6_ju5o{q6*6Y^s?4IfM`!@o&8Q?(rn=ACJyL&6YHU*bDbzMIY!NzQ@cuJz5MjR|OU zv^XNkyiz!(G^91aC9bsFhxw_aDm&>B+kVbF@O`Ar|DsiGu-qG4yJ5w-iY7`e^b076 z9UDN;YJP#I%c$3!A*`I=@U>dx; z;;zEMIY=ZOzPluwU3M<^G%;aJp6C|u3b5*8W)|sXrDAX1z?qk{yO?H6FrkHdW z7Iefyo9VBjugoC^J2y4Rf~(V#^$1H(gvLpXRj2iOp+}ip-F634XE%!4^lO}z z--NkndI+*s?bDLtZWho?S0n}c zuQlh&+rLIpQQU{}NgWJU!vw-pf`Uly1!+-dRZ`!k5}2np=#QkKWV9;l85RfLv%eh+ zHD$zJO1|chTM}@oDu_W^cim_odc6>?Hok-XX)`HoGz1qY3`uIZjxo(`F%6j}upi;< z{L?-lR7QNzUO;zWfq(XxfhKaq;B{bIrQEYGj6(t;4#uMS4o~K>#Kcm{R%EYWO+Z{N zNlwc(Ic9lx|J0Q&lPSTvQ7D%1kJ8zf$6VBQi{(WN1@|m&majS2iM9JWu+uLp1n*}H z$~_SnhCe5OV4z?iiWQqPK2kFyA%Xv}fp^RB>pbd5XlnCD%8d1D;u3nUr9iUHbv%d2 zkMP4WlpS?jlm#D`1g>fDC=k1e1`w6EmtXgfp1^d!s$#JtnKQCq{UBGL+*RI%wogRz zB+XYxzkY;#DnHvswTV)@S1EIZn&E3W5H`)x@GK;&Wj2%=AvSW-c6RMVdEFb>T9b=r zJzrcSvti-vJn_SJiQ#tLc*wxA@LV_Dta*T5BF`?Su}%Hay;9_{39>-#+B|0RNPQD} z?nfmwK5jWyn5ye-eFNkv!cfM6?bh4xv;-Wr0>{$VYBHSP9)kN;jiRk;oCz$)X*s=m z1&>taCMSGR-_mCo3Pko<>lC9gJ7%A|=vDA{t+GZBVEC6UCk_0iHH`=q-Xrz8Ui0K70XU47E)F-P4$(QCoa zYBa>)(=pjeHaYdc57_eOrW=J`MGiFJo8_bZ8jgnC%H0Qb=u!6)>=4r)tKV@G9C*Eu z9O`*m+*3_M>;1H^Bj)%1+>{sfNQ=SjBcy(FPB!EiF~O-x_inxrV&)t6i=IQ7avm=c zL1TD26&Cn{ojA6^yqmd^HdU?sO;T*Oa}O`+kw*@@Q!dZ`SWL0dXcCk9>mkZVC<6vK_^ z_Dsf7-jncJ`h{mvoG~S0j*I-)quzjpoLk^jzIZu>znMzpjlj#F#gYiQ>~N38So&QW%#N}IieN$tDOF-q%dP%oYu zXbG{incR+dm5)?>=qeXgQM;$C&L<}Vgc26Q&Y6>hp-xwOC-B{MyUv-7Gn&-7xsMt(4)2!x38K{CF>$D&|&f^RYO%BSK{7+=;w|&G74^xEplH z7asxOjkMtj1s3NAlLbqv`zAda9$WGkxp=`fZ))-+vAz!eyCVi`U`}C)Yl@hAH*59d zA_^$9G~mt(75v|wy3^^Mu;#Dj39rzXN~8C;ukGjd8fX?DtbW!%UOukSGYJwJLY%(9 zjy~QxCyA*W`oE8xj)Ld$^0HE@kwZ4Yj9|Lops+1K)$Y!$TRO(kdW>6EEm)a~Ml>C9 zzVl-B1D~>(W9=JT&VKq+<|WZ83{c{2c12mMM?&O6tT@z}twJ=pF$ED!KARsh2uCzc}7|$|0=E z8sRr^%w6*2C~p#xGFkWUv_>-ecRm(cj8~`EuXYR$6|jw(HyseYK`-<{8f?0GZyv0z z6filTh;pW@6Y@qQ@I#JCbgXXYZSPoF3cLW_9nbbFx||D3onUpAEd8P2E5-YIx^7{m zwp_v1pY21^#)Cx`SH(hWU7aF%QX_90Q|s%-r@6f4qsT};f`~w-&8j@D7|UC%L%Olt zQ?DKnVUfmpxZdcDfbPmXl1qr7-Ts;{v8IGPIke8pJUu%_Cs5(6|Uyjj3^i`+kP9 z(Mn%;(Z>t0$X`LTnuZ-Reh&+{U2b>YM-G_(tYosD5A!gmRGU?<`}y19 z^-Gsz%&~KdOUFWM{h3Qi6^KQHJ(wRj=0v)><=N#6`PsqJuJ9OTSjFplBmUjmJVL9@ zbzT6?WSC$QgBqs~1XC7HXBnIv0>74exV={`o9L_h(cQ)Fj>4j)V$p=xC7UO29urk3 zI6bV@8ijn+Lz$|!9roby3m61AjToHAzQZByL?Gy;dc-|iCwOq&&D3S~C0VjI zv;;D`gz~Arpp4i=$G^hMHR)(zl%c9MmB+p6ACsH%6d{@+zE*1>-*~HH>NzWvFO3{+ z(!82+)u=^^F1XI2MlSHk%}aN~N3HP=D{*0z%SF-ats5t&l*R`>gH2AzaqqZ}5CiJ_ zylR&Umk|#Fm-qptI7X`aH=9YT8DH16ZXwGADBOQm<0b>9o9=AW27)y|*7OSJA6mF| z`NjnZG9bM_i|)JIUzrpdq>E?KB*#m`6~358IL;R)-AoY-AszQm*NA|Lr$5Z6wz6CYfJoFY>ds3LgfKNM-_a3dIJy56 z{^1|2s}!6>xFxq2W8rxJjQK@wlpL=}rgTWgbFKD4yU{60FP7=ivl|n8IWe*F%sFzs z-*7Br*y5|gR}oppI39k&g(4Pgwfk)%=YoXmpNVT{1fhP=GGw}?v_{B7T+J|7`s3hF z&EoMDrX{|#b#-^WZ{guXHh9g&zWD5g*aDA7B)G84;@$4vKlPCU+KKx68O>Uzn6q$Q zNKpnPPX{R17Ud1EoE=0c2KL(4O2Y0IsM{;VqA%6{pupxb6sm(kw&>JJfUx5RZkI|+ zq)=PJ6qHGZ9j_fla|r9<=;+9o=#FlOd=XA#H(5;|Cqc%K@s?Ei`nllc)6wvWP!Xzg z&){HxC4Ykr<#YC0G@gQ%1EPI?X7;<_I`QH6Fd6Mr9yLp8Y*omJ~Y?b$0W$<6ehqt&)W@^r!P8&>%4Ht1guYhe^3oM^I7gnkGdYIU?y*9 zMpW3$DqG)D)kn~vl^*4|FqVp_(#t8qu?lQpk5H+1X#Ap$s&v6cGW_^aPOapSfRt41 zhbjowb2va-ko)c614-&T_6Jg812GQMedv0TzKNd%TOBKcJMshUm+#b_QYHZGKWz<1h4XJFuJdW==8Tlc|Rl ze2Y<5vp(F=Z~DwBUAuOl#s6b<@{}WgyZ(-FMp7NlXfLDKLb8%O`53mGi22(=sL8d8 z`1bCnnVdw&1Ncd~T`JBrjF!nwd`c-dVOV*M((thdW(3paZm!IL;iso>&7}S8Drl^> zVnuBu6&mZow@!jvl}*X>IqJIIHP<)ai5!tRjjam2SETTRnP(y;By(ShT59)<(84i+ z{{TUAW{)Ed!cdUj?(tbZRLWQSjhNzo`#5wV+o#Q(62=O&whcF3ee@XTK|(7{n2OFBJD%S^-IMiyg7~a=SuV!SuWRwGRQrJsKszDM#eirq`;3 zRDi$TaGoPzXv=3y5>GhNENpa(86mjZ+87qoh2{x1-VSZ7W~#S{aRd@dBd>|DkbQ8n zcHZ=nBC4JC+_Moe>s>{;RqfAf9a-N6y{>dPF&PF}BQ>M&p_H|fV+L8cW1hQ3!XUPm zPWjHEVLU{!4j{gbE-~c~SWFpm^e=}8JP`9{FL6O?7G-JvdzJFH54INjKI5fxSv<)a zgWpjP*2XnY^ebz;`}2zWCLE3G#O~EIT-Bm zzWPj;d8s8a5{7$`WM<7Mkit=JIN^9Lo$a;_6o~n@ARElgguTn5hrIxO7=-uDahgrc z7=X?$aND6Mjm&lIZfCl8j`zfwbq?Q&mwKU`W^;AExFcAVz+GR4&nLKN3)-2P3&L~n zC%PGkVfT3xifsP?cXF|rPI~d>C2By!rQ+g(k6B35I7*0jGDFgmDRJZ8%8@HfOs+r_ zL2ad9)FXkU#e-b>{wLjpc%%w+ErI4XtQ`8SBlE#0q`XV@+cNLPvHHC);A;-V{Nulzm-n-p()Wn2amR_Q>sEthlt18t;O zwB)Q9Cb_N9WJjwssNJ9!NPXe%*T zx~UR(?cswhv}1k!*idlH?YC;@l`1#Q_V0__bv{6b85l9LcpW0)h5}U{9=|nVlx_Xt zY3wFTNN$w5`SR~q?{aS(4nAtx2@F`sigNbTadx-nLYBTKQa~6Wc#feH8qzV;rdZf4RCii$7N-X|rkOqlpYSY=W)1BEyi-5YO2+{5)ph3)VnHTdZiobqI$GF58 zXN8$`y{js$NlTdf)#oOsy{OW6h8+=o;VU5VQg1u)a};JZG+qan<1cCihf?RTR9GvU z;&PxECEF7-0c&!ZBphuLh)+~%>InGMMhE&0bS7$;hv>7nbyMF+km&Hwq1@oDq&l`# z#J!cSo)Q1Q_TC{@7%uA4ytZw>*S2ljwr$(CZQHhO+qQ3gl}>ul>ExeR>P*iW?ZHXb z-fKT=618RA@4m7z&eEQ7aC#Z$-nXc(lOVvxw5|K57k9p8MSss{+m4rdeEr}unnXB! zbKmKGlhDrpy?M0(pVb{~4eRT6=BX!&$m!V%}l?bdQ7X?&{k= zzVn5>bx<7Lw*^X&;O;)SyABZC-95N#aCZ;x?k>UI-Q5Z9uE9c(M}F_#TleF;^t_vxzXo>i4C`$w4F{HTVDW(ri}0XC}|9^C$Nx2>iENZSTmTOt>3Q+d02 zIkZz@5-OqI9CD$OGqRkY{}sd(6H*|+H^sPl>D!I0YuOEktMu)quCHT#I+=IH@0vcT zmt)0?_@Rd{fSFaVuNJHu#Z@T1?%=lvNvAL>Nz|q{VMq`sWM=s)CM}RB_qQx%AXdnK z)Gf&LRbvB8?)M%HyRMPDUd7hr8DBQO8?P6Fj7e4gsueNOhsgEWoLxn%&e+WUd#X;$=p<&06=uZ`aDbFo@hoXdAG+r)(S4bH>Wa|o^Z69DbxI* zzTw}qM(DzaC}E+n94sg+`(cQY2cR?o8jrlF{>pf~9x=e4Xe znmIR^A(oaw5~6qQfF8fa>T9sCAb=GlKcX@dUjFo0OQ(?P8%yXfOFOY_Xp7gLS+(!b ziffJd;Ooj(^L^vKz@IOyK0w=I*;wsAz+n#c4sR~II=WGA1E)PC2&M9YL@r*fxpUPPGSC1D|PO29Dh`C~9DHu>)meJs@R4BbFLO&N2s(ifFt%jlc<}V>Jq0lpF@J7KR#M zY)Ld;G1Fn4?#$x>hUBh34tMbiuU_WxIeh=2IvaPOP@}O=jlk-Fp*<^1(RJirVAJ$h zk((do57O^n64!Q{OoX!_WrKwrxzs4m#`_YfeUFpRhVoYHw{DLkQL`VcGpB@t^42>S z1kdpxw2!_B=cF$VVx_Aj%5%DNT_I^JbIS{__sSCOy~4sH5YP5Rwo0RgdMoNE9-Nb- z%L7>#@8Ssa5;@8oGvn4}-8e0UxiS(_JT9T*o>)^;o3S-M;or~|c#A9d$zXoUmFU^( zThefxvD1&A?d_AQ)g@DBZGx`Z)AYjG^HBe3QVAM^T~5wc29}Civf*wxR4asXI3G>> z8S3D9W&wZ8af4(%Rp0m%UdE-NstMv+Bl()Ue_AO|wTSTYL}=c~3o%p~90!gQSz0nn z|E(A=8OU0HNAb%pw!3G?uPfB8IorO+Q3w68Z=4#W`KbtQrihrl;*Ey-VaB(VUlGV= zwbpOVc4w01f}G27eaj%)XNzOP9&K3?s28vG&$#19!y;_7OLv7!9bBK zm53dD&fqW3VcQw`+gs3f8c}c*t8t8t|8r9hgkY~JeT^iSzUE_u^CCC(`G6DVZ+9iQ z(TBx!R=e6m(WLbhxo-F7L9o> z?S)Us`048v?v7i1UrSx?WWvKiO73EM* zP7mnQcpRY`0yA&@Q4Pb#Bw%gVDbJ`Z_SJ6BtAXFaVN#$v{X1o|b)tIzO?_`vnb}F4 zgUqU?>2GI)qJWTZBU8Ocad0=Ga%H5cvJDR@?AmdR4><>^Xd#1z#K+WF?|&=e-3e#i zh|rWDTVfSiEmvWAe?sU z{C*Hi_57847)OvZ)vLU{0jxPype>XG8|bQXR7c5 z2h)MC8KSe|N-_?_S%_Ap6C?zg>m1#{v_MNlWYmQk-(}J?O3Rh5IT9GfZXX`Fl6VK2e=i1D`Pgp$jPyb9?!5r zvz7EYAT;@r+^X2?8CsMkp|y4ER2ckmG(6%&RwxwhORez=Thjo|bP)3aT_w;!q#!pT zbcDEL<$}i7}j)(kKx?dY2$8rwqT5 zQXkc=)&nYrvFmfI^JJ93_w0gne!7LLhF=>A7t2^lr%FX+0!U%UIKIa?X++GZ_tufHF+`t-x?yvNEXzhL~jGhY27!P$%PM55oPE=a& zD`QR6>^G*)53({C!pTT67^6p;!b`@`@E%%s7HwQ{-|*cwkj_V4ZhzC~3kavtnKQ0g z$TX$&<@@}$g&NJQ*moXSSXTne>Ey%yC3QwWEfE<^G@ugMg~5ET#AVh%@SR%NGu)|t z&8Z+RzmUcE%%D-lrR)2jbJ|L%!b`?suVG z!$PM;w2SVM)DFvfWM9tB-3Z&+WQr2qo6wwCoFbA5^k=we)0t$VNh03BVRFDwmN-^* zP9IW@5v{VQXA-8~jP;(IF*xV$u22;S_L^}$f__YY!-Fyf91zC&H@wg^fUY zW^N!3qNNfRv10M_Y0+4pCqRh(+WvJ(pzb~pAGh^;VFsj@2MS-)nDw;A^7VGn%ph+C zXy7s(1%k`Qm*{RVS%T4XO`~g3wE~V1WWREFa~8yOkv!=h2mZ3#Jo?O@sxCBeq?_60 z_PsY>W95}Z@d zZVDu6KY^gD>2n99knf=mzo$QCy@b2!=O_^gpN{WPl0B^V^_J(W)w&EkW-G!>#Pux! zgm}pfjnF+?wT#Yc&bbJ_%<9)!vAtb^xEZ`}%aA4oSP_PVnvurrGFPTtSD$d{#w>pO zI|`Xdwh5h0aVb!99rn9ua-$ zWz`(dWXINUqrIb35?&^82lO(>fI~LbIaTu)PsNZ4$R2Sn(qXrJYTLSIy$aZa&(zCu z1xnK$ZpG!EqAd&5Vrvj*khEzReR`e7iy2-kO|he<%2sa|yotF|p^m1KT)^fXALonI z!^V;C>ipwa_K4*iYGIX*g$Q_ar~Ve#v|+P?GPC(#T6m4p2{#YvaTARDJ^_cNGz_W8 zn$#e1+%uie{6lmyl}*XAoRRqIlCj`S9rN}LKM|R50N&^y`7i>Lvoij-J>z?K>dUS%D zP!pq?ovZ5Z3kwA1&IFT{k4=GUFufZ+nu}PkFO8E+{k}h2h`?cSc1r-(>{goMp|1Bo zf1}{wdV8HZe#U6^ z@sPgy!7bRL*wo!}KOtCzS?-|sF-oojHX6`|Jo6gdRuxxf=BGc`buuWze5OM_E{zGg zROQx_X7-KjHlVDv#9R1O|C6Q#?!+0pF^uqr$1uRS2bguclpyLl`07FW+W}5e942aD z43g9qPlJXDQ>r_$Ratk?Psq+wdjt>b&Huj89P+xfY0yF|7$&WV9jT*EGVPUe^cC6d zFai9wlbZVbmvd4^>cniEhjVqTHJoxHJlf#L?TE4T$Er^wY3n@Y(TOrhfwUUuBe}5X zj$r!-VEs}?M~t%WH<)QgIu@B?_@L^p*IoO}Jc>60J`F38CZ0_-cE@!_Z9=M}oR^JJ z2H|~=^4hs7^w-Hb66qZZ7ge|pcla_5xozFE0;)Xq#?jj37JMnwp2hUvQO%dVt3P3B zGCjp`uei%_PSdb$`=ckLr&_cQB}Mx_c7JAF{aO*Sow|v3ir268Rk%KJ2S4bmB7s13 z{kX>SxU=PL$qUt_SzIcV6x4C4`J{2GtHKdB;R@3{Y`>(JD=NmbO7z(Je9G5@kx!&P zvCx^+ChSjx;8Z}ka=QF2V#fNk%p{M>1T^ck={d2N;X55ontNq+@<1<8u@Lyp3TK|J zAq`$h9{sAKk)&|iQTOMujb&iP2J9bnK`IlBxj7In*fBpBS9u&RdmVfdjQw4{RZkX? zUE$X8+qo)%U6|mNRxAM+j97|6uo(Zk#t7yp{IO+K4v<|G9f2_1*O|%~eMl>~8rFcL z<&LY?pYp0%`gOA2h2O+>(XNEdx~3C`zhDpPoQ_p^F0PJxN-7dM4?Un7s*wgs! z3ZpoFkWiUq3@XX$w}u1a!po(H_i?-UopvL|z;LFwnG?!4?S~63Ib|enH#QR`h^x7- z&WidcO7;33DUU?v#3ygmaTI*jpm~Yp;D3zWO#4v!rhjSXzB*c?$yi2a8ql?^enCO~ zu8v_E7Q^${k}R^$p40q|f}?!UFng|yQ8qjNJo-q=D5e}d4Ye+n3)@n26!X}x*M?2F`ZT&C6aOSHcb7uA@-1<7^7RwuX1+d;rTbzMkF^f{sPGp22=jX4 zP`c^o74#Fm(rVjrCq7x$aAXm!F{Z?C$hC5;RMmHqUlCR?Bg1WB^ybjN(MN)A|LNZq zF4>#4N+2@AYD|0?OjOrgyQfK%G1?b?Bb?*eDCu?E={Yu^&a1^(<7Gj7O}~Z@ofLU? zVAHPxwHNsV=^C80&aQ^kW*M-P$bwHus-x2r+i$BG>XJ0UB;)Eu2;hE>dhHY{uzmBK zOt#no+^fNov5ji$h*?~4kfSH?m`d+GP5zY2?gGO|FK4AaOR_?FJ+lAaNWN)zk@yvE z)G$ZYVNMxU6_dL-b@jaTPP*=!Wq<~^fK1H(knu%^{1?ADC=Ba%BSNFOV^dTsUdaGR zD@89LDP8OS+M_rVqYYuBsZPv6neod$ubN9Wb12%aK&=2g{&29p>BBfIE~Ch40kTu{jT9gb-1FNFLve7=7nSbg3l7mT z&0i^m#T>#A;pC(Di{5mgryihleib&Av-ZP|D&k{?_k@s=kMVirM)24#W@?}P*R9uK zY2_ocz)epm#{}KC(bw~0ygE_^S1|UzgNB#!?;J3*f9maSLKlrqhUTnEr=)L|_v*H9 z32_goxK&$c!7fOv!mFzg_GGLV8xqqx`hoSO?Ry5+{`10AG6ms6OV z>cQ~cDzeo>rL4e-%_<-@Q0t7ToB~}UPAK}BO0o&=uwL*BM94bH{3{9LoDq@mzf*^- zBz@%@GF*7~AB>H-gum$0U4mb=Yt1ezC7_qt-Q+qc>tpE;NWH%_H190s&GY_HbSB}?6r(W1n-Zo#Bf^-OCPT&9T9Fz_hel=|&u=t3 zHJ#_N^5V$Q%EY{dVG+EW*f@I(w`1A_W95kOaJkg;%Bu0#KhdSTtngbLZ{Oa;rQOEm zc7H*QE4Ob~D6Y29}-J>#ggrY)w6I%nVBjI}h`rdl48q;wWfQ^48HWdv|sU*vp^8o5!-m>9KRb-Y`gL_!R{TqaLd{e*^eYsN&R8nhlo$MBIVodRs8YFGqV*jM#1 zewlMvq;MRYbcyR-Y<&EjJ?rMTce8OT^swm#8E`iOSuSCKw?c}1?dDN&%Ia#XM4(h0 z5j@edLgIWRht$~jV{@SnIW;0K0L+v_`2fd26+&oP;2c+93)wh;XjM$ zrc||k1)p8s$4|ZdW~@YG0G8Y81mW;jNqw|7-0cWM?VvwRE#xK{ zO7T(Q=dq%4Wd6~5pqY_7M&j1O#3g=eyeoxaou?e&oKWgBj%6CPUE*?OIu=9-xcNeqvyp(JIJX?863}KLpS@Wb|1t>*OHN~0*lSr{D zEq_iIr8nTIDTD>n9$VZ!{TAxw;%ZKY7%lfEVNN6z=@)e?yD8<|gX4$5#zO|KG!R#BHK{_G@4&wddLmNVBc@3sRJGvBXLH;uhtR zZRPb?Ox7CLLjRi!oztC*rzzr-hHCPKeljO^#meSt&Lh6|+SeTX%O$EXlXjK$m9?L& z5X#EhFh%K}o4D7jYxL{7w?D%(aV$gVb%&EAhT7`bpij^iL-b!?Z_cM_#Z(PN)pYuU zwCFw|fe8Sarx}J58KmM*w9bsFf>k0S3WCdBtYg06h$BiA0_-g`GcF8fVVqQ=$yYPU z0l;Lo?Q@3POHD&6kdXGmPkI9kJ!M2^Xtw0|QeJWmmGL$Wz%ymw%s!8Fdf*J1Oqgt*iPaQYi8O^m=jj%XhPQ5Up`kTvFXAXsMSza)w$w0xGp42v)Q`IS>R zkb>k@iM~qVkF&MVv#{;ks+=-PTJXaU_`L}%k7-IS2Hp&*LwKZo_=1^;pfv=6ORl15 zS9nhlp*ekFh!_(Bn%u~vI=HiQWC55R({5@$f*oA}(y7NWt7^kHOEwwoptc(v&7}K0 zgl{exkQ>!Bf7r|_fT@w3g`(!0#E;A}zcwaR9B7gf#31`7Q?!7;ZjE3*4aa6FIt+`{ znEm#)dSO8mUQ(H8E@0qS=pV)KvDD%3WWj!2SxSi`N^#_vu2Rrq%!vOrOcoevzI5z_ zrcklp54{+Eh&dWK3n31p7{DT|g?zSXW1~MCrZJuPHQ_P8%wl^z=vV_6hmV^}Ze&+j zSC*582X5+ajc>V$r##SKe=@0B?76)ELOPski^I<3%G%RJOZ-3tN`P2V5eRwxZ7XEh z2ct^oa%M5{Hfc&m@GDe!{qa>*3tLwDudw;$WE_y{P^Y4hC&cQd6l6}IdC>hjLeRtS zr3Ry%D0epjs=Q-7VGLM=nVd$IC_GTO@!n*=ByRW9?B4SSB=xFsJSbe<3E`j1PPPv1 zef$iugYw&q9Rh)Z)3raWh0o}+RJORA;^X3Ra=%V#P5GC4`^{k7BIWg#U*=bk>(7;w zZ}{ui{>XF)oi$e;xt?>H-8Y_s(-pAROAmpJb`mujrPH5!6yFPhdX3QdD$h@?(nd$! z)N0JeY~#0o)`@b?kT9g zY@p2G7bm#YXsk?W(hQby-A_T(D;#-M}YPB$)kZfs83(SZbW? z3tL?;%M#k(+^73(pKLfYEUGfXIOxwv+Q3!s>$O6_ukkk$8O%&*Yn#3g)pY{Ey9{^k zyVV%mXpl1SW|Od*gmK! zKRUqyKar+DUX|IEV`ckrv>7Y2&1lT^-71xB>+qPdRx88xZkws>!nKpq`xrq&rrQiR z!?A5^H>3GVn!{})EOoioL%YR2Rrv(jKySoQ+hoG%VdbuNez#kLZeU5%GONQp|Jm}) z4b^!K;81<5ugR3K@@Y&QIKE}K3@3*L=*q>S@Rz+F)y znp-}?#aHN<-CJkO3+JKTT5~UKg%~p0RSFm5R0MBkp_B=_7NMuA9{jbqD&l@Q@1r|+ zBwKzHc58scpK;_K@xwK9kt;o_tyh9oPne7@fcOuodcRlbhgkDei=p|682pnm1nHuF zExF1TaoV^psf}jE`?RqqJ01+r5(NL~cBqS>Yo_I&p%+o#$>!^+ zl^|X$p|hTNty1f}KPKC;{%%~%^~a1o-O_M^4#Rx2O;m9Gge}Y-x$&u9`S#pbnfMElTd{V;250LAm>VSE=tCgSn0Er3iGvhDrUW2o++t*Y;`- zq4aoV6;{zwoBaGgJ%f>%W1{(W)C3d1aqrG@=gxTQr(BoCTzV* zXUNnYLfiuC=FJbvmJz|1b3d)AA}5#uU>t ziy=y`o5?)_d>lqFEdal;VLk|sHdeEuEW#Fe?F8=CCtA@@8wUS|XWf8;{Kcy>(Pr?g zwh~*AT{MfYZCHPy9Jf5j8tn@xYq05TcXcC)kFkmSWMd@|HZ%jCX|5V7oicFq_W6-E z_qNQIjtI~%C_(uK%F}ka(jpy-rx&DeGR0j?nTsnbo*49J|Mw8FqI`#*l=ah}7iJWz zCr)+Kp=Q&iUL_!0i&vv_z3@Kfcsrw1Jg2scBLR*hvv4i4q+XR5lU1r)j#`KL5uO4B z4H3OXRxWfeb}zc;=?Kfpl4AC>4fKU<@u?_hMgwh-RB7CI;a}sia?<;y^#VuZ2`1P} ze6fDQpKggx+SJ+(uS%rqj1P4(UG$smEoZ}1ElT0>ei6M%722WNlYyj6==0obNyn~W z#DcFnBy1FLm2qV-a+0Z*IXONLv`@u7IM%d_e38IQ`LbP4Ba+>K&zW8nRPh@s>7WT2 zrqw3;C{wEf15(#aGe;31)@#*u5L{CX?yny>45Jfw2Gb}AX(ts!pf|b&NoIJIq%kyJ{Gk&HNw=(hx;{2%2|&!*Kejto^LW* z$;j%|nnqb2bxgd^SVu(hwHq1O%(_Vi@?n}HC(rwh7M_+18qZAY{+ooHcI*94n-yT= zXH#j76EjXT=jcPuP_dn9VgVujBoTS2LvGxjV1QZQNaPMgE%QS(0+9?i2ce0`6Fl_@|7aSR6S}AbHGyFzc%@8}WbSMM0RC%(A8FLD`~(K>8KY`p5noGIg+*1*6Mm}yNU^2^unnLCtH8z12RQ!;NO zcu2bh$spD3fN#Rm(t8wsZEynNGIL50Fr(b#3UH_kX6pA%pxErZk(7t$W#XC=xbk_z zZN4Wm*VLq_EyLR}$kN)P&S8lIkHbF=D>+~g>Di@Es*D##siQYi=Fc(`;T2Vk6E?5h znnK(6hg`ax%A4!W#LuR**AEqFLj&f zb^jP0cu{i89BEtqxyP!MZFhoH?w)Kv_>2x5v^p+Db~}F{W(8HMI|)|Qv{Ww<(TeDX zNokkA2@uY;zdxlD^u)Koj-~aSb*vp2!O~=>C4JyVLkvsQ@O=_*(qWMy@y9D2PuSBZ z-Uzf*0t-Q$6PTxCx0x{3hLO@~F!E8-(7Io^ig&T%am zVG=()E+YrgAMtWY;ADz_U1V^P&EIE;jQ@%tr@O_8qE|CaFb;qK4MqDr z$3lI3yypQ|iG7o7EV!q~-I-PCtp)*&q7-;CdA?>1zxdtI)J}O4gJ!M+@F_>2HRzCz z)xsqcCxri`bZN8=VgO(FrJiy8BTT`#YM?%+nU8fDlP|EFq^-YPu=f#5A*GuHO7F2q z?Xq@9*WP!p`ay17TakRuF)NIY@qQ-D-H6i|r#gklg|IIM3f#j5Z?$8NjIOArO!LzZ zJ?U~=OolqLU5Uuo)@ED~CYfnTRKjDa+@eJOo}ZN+Qb=xhn2+?(ZJC^l5Slj6aXHh3 z4-aIOs%E5nI?zKZfp#B9#KfT)^;;+GkhW(VH79nm7N?bLuQ&S{=9f#e+$ehW!wgP0 zz>~e!vspB2R2pd5oKXcMl<%Sv=c@#G9D0jmF7eV|7u<~gne6PcUn5#C`-^|deMB~m zVHk193;6vgZV}}(juir~%UQ*v9J8~EEE$MX6wx^^aJn?ZU0I!FG_~tX^<~2rpK+Mx zP4Qla#j|@#4;F00QFIg$+I=b8^~J=zOEJGAnNxyVf2oAt3UotVm*JDs>-S(z-w>$) zaF4d|v=sli8)+{M;R-cw^V`lsQa#B7?Ub2Q3(8xzmH4#_0HW#TOPW+Mf6O0Zt0A>i z56>w~RV{aC*tu*Ue9H~+nLEWtoxrSfPxd73G3gNmC1v*B4Kd%+pE(H}>mCeni=!^l z*2u`g>Qf39ZOwi`blB^g+h^#s42nR_&XM~i;wDF5<-Y6|*SY(JsK=;W{K0$_MiiihfhDAdA5Qod2;|H{TyLKLwx4p%uyp~Cls(12Sm zN_V{}%%MohF;@%$^+>$?)tB5K=tG2YU6cN&BU)r3_nYA$0wd?oBd-Bpt$p zuF8+}07yAqOw#VBydWfnf+w@xcpbJ=L4oK9XB}Jq)HYREqbx$seM8Tut)f*lNB`$m zX9DlJg?$Ee;RZYsi&NFY>g$I#;&6wyLnGkSJO(ODQNxt$Jxs>S&(J4w#A9ibBP~7p z+h_cug?`L;?aSI|%9JBzRYi&Y_?eV4VEX{~>MOsBdLbT~O_!0?bi^aBQ~k=F2$IP4 zymu;Ibfv@IWpj}-r#~jOtNCIcOY-wHS+*J4ZHu~4)3E8N;_8TSu9|K53dSUM2Vm6{ z=Pfg13ZlKajbdAzo!cdz*dnOC^53rPb`1LsyDs&%~bUQ&6 zwv$Y@2VaWm8iZYHBPIL9!Kt4146sx8X%&JjK4A1MX?g*>yMH&`>Ps-{5|lKCAEeN7 zVC}Nl-Eb_tRPN9+d$xuYI%!qQh)%LF>uya^3$1O+C4V8wpp~X^X@-)z-#3f%q{ij& zwk9;ulUq@@Rp7StpP{1t&FIM1W2L@gmc#xi1NSp=fxj2vv)x*ANFrp8!Yss?@z|*nRQEYo!dc2Pf6^QXSb$lA`>wcJIi?mJWoeUeLdFoo<@;kRHGw@)fSE^;6`K ztCT*S+(7SsAJhzqZ9T5@BW>?c`BO6WiX2NW+fZ&mm)wLKx!#XR9nVDq6BCPn{@XuW z@HKGn_VYmun0Qfwu3OI}T2Lw}L0K)Ps#^pV4^N(!T^-NBI)Qw5FFeAFok282>t+xf zjA#-Zo=wiO3prC9wpPE&({;9<=-h*N=Fe_aw>fzkgjv7-2x!6Q5a!EuJ{b(%^%};W z%}Yb7$G##%)buS`S7!TT&>&OLbVHj=7pk&nTn+I?OIoQ59^(0@Jd$t1V0ii@(H9{N z7RZI{ly*SYea^NPH|Hm0bTME4wF(yvZ>U#*xiE5d|6|LH?B$fXrPO}^Q@E%K)s7nI z5@H1F)I4YMxew!;hO5IDLO75dipqy;z6|-1^XA~JQFl=(U7eK_+ip+h2_4gvHliT? zLGHiwxD`J`GX*`qgYXhG(M+?11HiB>v z_#i3r1PQXqGZk$QW?)x|)jkm&dK>QaH|3(m07pXC@`N=Zk$GSVWd=gCSF%-VlLOE% zJa9+xYb>M!*B0;WQlWe!!^wvN9-7BJfzsBesU2IX7jkS!Nz)`AvPjS7^G{iJ); zp54M|U^iL*N{V2Os6XK*01;lk3hVCo!CAAQc}3aDe&iJ}@OXuT=#frYg_^~9Pr^5d(%Y8@moZ92U3(UY`0r{y z87yHmJyZ5d!li7#{MU$@>0w2g*tAtU>lFmqg87rhFy1VWa6l&wHo?(XRKU5q*;VyU zS4gfz@@(X{;#OOTz$T#UD4$8-!?$%*_>7;wH%HfrYRN(C;IG`S9Zh+*rer;$<1HS3 zKU8CJ?p8bfpjyTkq`8niO(}_N6vSi@ zicz)es|s)MU_18bho4h(P`h(C8BP0wxIlXS4KIDy4m#Iyc&7h|ogiv4i6p?kfu6lA zcrmR)KN;(;p3N)Nj`3pgigf546fLrg_%f)SUdTJ}i*IA2_tJJhmEk^}(Cx2)*?NfC z)JI~Ce$oawux$;?^WWrH0u(0Y+`XF$ZP^;C(3(5d$G&{U`5|-rJB0$te2Cp5&3Aqz z_lb%m&OaK#OYa*G&U}p+d-#T!`%rs-Cpg!@_Pdxa#I+h2>*&yEN!%_Hhm9i}F949p`l)|&j=S?= z->>WU*4ZV#p?v?xqi6k*u7;=Q5W-<5x@BX^Q{(u=_t!iHknxKfnNI?xlM#|4Sb~Yp z(7*^2MfCjCcd^Eo%Gf1dmJncNN0y=+(@?oRTti19&3VTB$F#b4*<%W48egJWeotRS zMJxt6A1jCS?GFZ+Y71Gcdp+o_`n<&CXIH9BK zT3b4X=vck#Qmet_IAHEU-YoIUuCp~+Dn(k@Jx7@tjJzS(7}&G?q*$e7wqF2^@bk3Z z^p3WgNXYFWfb2|fOU2R4mJv&t*;aw76s;3U7l+0rcQMp&ftK2+nasD(-6jL~9RF@X9dpPeOm&jdlLWjk$?D|4ucDbd9@0TC&BDjC_$FZU+Uo)nwSAs2DNGHuyU_>iri!t^M^Sy~?UOM~1*5H}O{d?KuxgSu(iVp&*H-00eO z!P-hn+Gm7wQNAqYctY5VlUx28qGUx>8%w7&1_>Wz)as=h;#rO({2 zyuhfL26GVJ&`Z&7*I%F$x^YDAC9DT;E}a`VPA`waMaqZ#tX0_G>8zsMz&A8+g-o(8p| zB1(2^yeaS6Z4TH!tQ&Z0e&N!?*CjI_Ak0AcEZ(+*tvplKMwTXUoMrl8Sx-h z-2h950=(p5P^}HtNkrBuu_WpRo3na z$4SWDys3p$^UbttrhG;bl9w~dL3ZwC_!QO`AT@Gio(aJXQhFc@l_4LZBjw8SSo#0Y1q zqdE&xKsXN+4Ku;V0b`o%L|UJ!V07FeyzIC@+|KK`KC?FYIy{dXAd@JJ>os6Y$CqMQ zib5bj&Yu=@eF3s{8F*leYENr%MQ0(hHG-oBt(G%W=?ohP6d?!SW^(#@%MBqp{SyObWncl&{uW<#vJJ{qx;uz9V{(4e1 zyu-x2^$espO53HfG5wGLi`Gx0t4VNe5J}sw=MvJ(Oj2}Bt5gAyWC`cWo*L*rtawH^ zVBzw2cMR)kE15VA_iN`4V1#9h`j}QNW(Egz(EAE>NM&GV7VIr3KWNSx`d z!WQ)7FX0`ZNVY+J0T_CY9R6(9BdFm%;!M?@qvFDcU+kIIFXg6PgKJiu2dkDEmc~69 zt7@{jC)oopw7t@+%Y8SSofyA?w{xgdToobQbIK&-UjG2{7Q1<~VYe_rl1H$>*)gqT z0*p(CXe|_YRf|xZ*tjF- zQ>C=st!8Zl5>7iCcvZCnZB{k}A%T;78a)V{PMw!aG=tlEBg4ZjRKaxE>TwewvCjfN z$z5?Cd%pU^IK7OyLXLntAX&k$Z+Dr1zOj=S>ocO>9oi|*e&>el>Qi>hMWq>`Jb?IS z%I=RuT_fxGD_-}yU1C{~gm|AU|6#$c(Y%$jJWrh=L*~(S#)Usb?3?Q9iY4LU2&r`E zD$hJ@25NO9oo)6gi1oh5XjpO2KLmA2O2sMJRkFn*Mo&%BXHk*6OIMS7UYLf12nbU0 z{lV(V=sKzp^CT!vN-|wltMzU1)SMFzF&6OEsoOnZKm_$V33={8`2M`g$f;gW?t)y=_9i$pyV4mIlN&g!$#yWk&ktqtCyA&O<@4 z{t?e#f+`ZZOnC(RbzVxb6Xn#6DvI%K$}v~$=u2az|11b0o0}fC^+fcJGukpg++zl zjm?c+9jRa#1e_erY~N2;G;?zR$mqMhk11qttpDzD51^E{H+D8RcKL7UeO%#pi*f*^ zFc&i;GbsJq%5o4gy=0~ui$|K|X(v$4H9 zvYP<@3u9+udN-v1djM?yUyOx?o$cM({U6xhFU-szF?MEV)_-BFtW5vJm{>Sinco-j z@A1E4uycGEkBOC$iR0b${U1CakdyTT_8-EBoLSj9|Ju+0p=ElnOZ;X+d>0_-}S(!OL zj>pQ%#{3}{b`BOcjt{kG=Kykktn+^o`;apS6EhRrhZr18oE&T))|`WrgOTM!tytds zjqO7&?|NZp{BSM{3kS=`T;4G@_K)WRfgGG4YQ^%t=8PZXzGKWEbim5U$j1C3?mNc8 z{$XzK+IW}zKiB9l_OX6{G2jOcv9bf1-n-==>-)~*{J7@ty!YAtllQJQHsFWa|JC2e zHUEo!tTP)6GZW{BxxMo^IsX-dne}50{_2_OLrwo;A7lK**glN+j=hWRpYwRf*n$7T z*qA@|<$KRCvwx`HUySp^n!jVLod4!={9hXXSgXIfXZe`RJI3~}UU+Z9e`D_!^&jTO z0pwu+7w=t@AM5hoTZ|ub=4532s10_`_iM=q{k`|w`_A>xxPN<)>Hok!>gt`x&hc*^ z%g3I1=K&c%#>>u^~x6l6_my?6#gVx@8z>n+8!32CC@xwgcLvVc1ILA8%{BT|S zdo5x5Q2Td`1^A)%?-={XnsUBxfgd&Wj Date: Tue, 23 Jul 2024 14:38:20 -0700 Subject: [PATCH 23/90] add audio streaming & pdf examples (#483) * add audio streaming * Add pdf examples * Update samples/rest/text_generation.sh * remove test.pdf Change-Id: Icadde0849a0d358b605e7cfe6ff208d49d639dfb * use alt=sse for all streaming examples Change-Id: I6ee214edcc06827d1e73f7c1fdd3e380e7988896 --------- Co-authored-by: Mark Daoust --- samples/rest/chat.sh | 4 +- samples/rest/text_generation.sh | 161 +++++++++++++++++++++++++++++++- 2 files changed, 158 insertions(+), 7 deletions(-) diff --git a/samples/rest/chat.sh b/samples/rest/chat.sh index d5af4cfb5..78e6f9917 100644 --- a/samples/rest/chat.sh +++ b/samples/rest/chat.sh @@ -25,7 +25,7 @@ curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:ge echo "[START chat_streaming]" # [START chat_streaming] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -53,7 +53,7 @@ else B64FLAGS="-w0" fi -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY \ +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY \ -H 'Content-Type: application/json' \ -X POST \ -d '{ diff --git a/samples/rest/text_generation.sh b/samples/rest/text_generation.sh index fc2d7b9a0..617f7d136 100644 --- a/samples/rest/text_generation.sh +++ b/samples/rest/text_generation.sh @@ -6,8 +6,7 @@ MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) IMG_PATH=${MEDIA_DIR}/organ.jpg AUDIO_PATH=${MEDIA_DIR}/sample.mp3 VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4 - -BASE_URL="https://generativelanguage.googleapis.com" +PDF_PATH=${MEDIA_DIR}/test.pdf if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then B64FLAGS="--input" @@ -15,6 +14,8 @@ else B64FLAGS="-w0" fi +BASE_URL="https://generativelanguage.googleapis.com" + echo "[START text_gen_text_only_prompt]" # [START text_gen_text_only_prompt] curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ @@ -57,7 +58,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:g echo "[START text_gen_multimodal_one_image_prompt_streaming]" # [START text_gen_multimodal_one_image_prompt_streaming] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -125,6 +126,54 @@ echo jq ".candidates[].content.parts[].text" response.json # [END text_gen_multimodal_audio] +echo "[START text_gen_multimodal_audio_streaming]" +# [START text_gen_multimodal_audio_streaming] +# Use File API to upload audio data to API request. +MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}") +NUM_BYTES=$(wc -c < "${AUDIO_PATH}") +DISPLAY_NAME=AUDIO + +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Please describe this file."}, + {"file_data":{"mime_type": "audio/mpeg", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo +# [END text_gen_multimodal_audio_streaming] + echo "[START text_gen_multimodal_video_prompt]" # [START text_gen_multimodal_video_prompt] # Use File API to upload audio data to API request. @@ -231,7 +280,7 @@ do state=$(jq ".file.state" file_info.json) done -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ -d '{ @@ -244,4 +293,106 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:s cat response.json echo -# [END text_gen_multimodal_video_prompt_streaming] \ No newline at end of file +# [END text_gen_multimodal_video_prompt_streaming] + +echo "[START text_gen_multimodal_pdf]" +# [START text_gen_multimodal_pdf] +MIME_TYPE=$(file -b --mime-type "${PDF_PATH}") +NUM_BYTES=$(wc -c < "${PDF_PATH}") +DISPLAY_NAME=TEXT + + +echo $MIME_TYPE +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${PDF_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Can you add a few more lines to this poem?"}, + {"file_data":{"mime_type": "application/pdf", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END text_gen_multimodal_pdf] + +echo "[START text_gen_multimodal_pdf_streaming]" +# [START text_gen_multimodal_pdf_streaming] +MIME_TYPE=$(file -b --mime-type "${PDF_PATH}") +NUM_BYTES=$(wc -c < "${PDF_PATH}") +DISPLAY_NAME=TEXT + + +echo $MIME_TYPE +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${PDF_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:streamGenerateContent?alt=sse&key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Can you add a few more lines to this poem?"}, + {"file_data":{"mime_type": "application/pdf", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo +# [END text_gen_multimodal_pdf_streaming] \ No newline at end of file From b19fc8c0793b1ec1131f21fdb004492f40034be1 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Tue, 23 Jul 2024 15:01:49 -0700 Subject: [PATCH 24/90] Add function calling REST example (#443) * Add function calling REST example * Update function calling sample to have file content in bash script * Delete tools.json * Move tools definition into the region tag. --------- Co-authored-by: Mark Daoust --- samples/rest/function_calling.sh | 62 ++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 samples/rest/function_calling.sh diff --git a/samples/rest/function_calling.sh b/samples/rest/function_calling.sh new file mode 100644 index 000000000..f88641e81 --- /dev/null +++ b/samples/rest/function_calling.sh @@ -0,0 +1,62 @@ +set -eu + +echo "[START function_calling]" +# [START function_calling] + +cat > tools.json << EOF +{ + "function_declarations": [ + { + "name": "enable_lights", + "description": "Turn on the lighting system.", + "parameters": { "type": "object" } + }, + { + "name": "set_light_color", + "description": "Set the light color. Lights must be enabled for this to work.", + "parameters": { + "type": "object", + "properties": { + "rgb_hex": { + "type": "string", + "description": "The light color as a 6-digit hex string, e.g. ff0000 for red." + } + }, + "required": [ + "rgb_hex" + ] + } + }, + { + "name": "stop_lights", + "description": "Turn off the lighting system.", + "parameters": { "type": "object" } + } + ] +} +EOF + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -d @<(echo ' + { + "system_instruction": { + "parts": { + "text": "You are a helpful lighting system bot. You can turn lights on and off, and you can set the color. Do not perform any other tasks." + } + }, + "tools": ['$(source "$tools")'], + + "tool_config": { + "function_calling_config": {"mode": "none"} + }, + + "contents": { + "role": "user", + "parts": { + "text": "What can you do?" + } + } + } +') 2>/dev/null |sed -n '/"content"/,/"finishReason"/p' +# [END function_calling] From 032f78f5d10aed5bc7173d92c2873e8a1a016488 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Tue, 23 Jul 2024 15:39:14 -0700 Subject: [PATCH 25/90] Adding JSON schema curl samples (#434) * Adding JSON schema curl samples json_no_schema working Fix quoting. * Update samples/rest/controlled_generation.sh * Fix quoting again. * use response_mime_type Change-Id: I29f337ddf8ee9ceff628111a9124cb4e5141706b --------- Co-authored-by: Mark Daoust --- samples/rest/controlled_generation.sh | 44 +++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 samples/rest/controlled_generation.sh diff --git a/samples/rest/controlled_generation.sh b/samples/rest/controlled_generation.sh new file mode 100644 index 000000000..69da2dac7 --- /dev/null +++ b/samples/rest/controlled_generation.sh @@ -0,0 +1,44 @@ +set -eu + +echo "json_controlled_generation" +# [START json_controlled_generation] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{ + "contents": [{ + "parts":[ + {"text": "List 5 popular cookie recipes"} + ] + }], + "generationConfig": { + "response_mime_type": "application/json", + "response_schema": { + "type": "ARRAY", + "items": { + "type": "OBJECT", + "properties": { + "recipe_name": {"type":"STRING"}, + } + } + } + } +}' 2> /dev/null | head +# [END json_controlled_generation] + +echo "json_no_schema" +# [START json_no_schema] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{ + "contents": [{ + "parts":[ + {"text": "List a few popular cookie recipes using this JSON schema: + + Recipe = {\"recipe_name\": str} + Return: list[Recipe]" + } + ] + }], + "generationConfig": { "response_mime_type": "application/json" } +}' 2> /dev/null | head +# [END json_no_schema] From 84db0618929158e8ce6d16a9e09f6f319be5292a Mon Sep 17 00:00:00 2001 From: Guillaume Vernade Date: Wed, 24 Jul 2024 15:27:57 +0000 Subject: [PATCH 26/90] Caching cURL sample (#455) * Create, list, get, update, and delete caches * Use cached content * Formatting * Updating region tags * cache_generate_content region tag * Moving the generation in cache_create * Fix Change-Id: If09a99b8f4637bae509b32469b0cd9fd9ec60024 --------- Co-authored-by: Mark Daoust --- samples/rest/cache.sh | 83 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 samples/rest/cache.sh diff --git a/samples/rest/cache.sh b/samples/rest/cache.sh new file mode 100644 index 000000000..218b5e4b1 --- /dev/null +++ b/samples/rest/cache.sh @@ -0,0 +1,83 @@ +set -eu + +if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then + B64FLAGS="--input" +else + B64FLAGS="-w0" +fi + +echo "[START cache_create]" +# [START cache_create] +wget https://storage.googleapis.com/generativeai-downloads/data/a11.txt +echo '{ + "model": "models/gemini-1.5-flash-001", + "contents":[ + { + "parts":[ + { + "inline_data": { + "mime_type":"text/plain", + "data": "'$(base64 $B64FLAGS a11.txt)'" + } + } + ], + "role": "user" + } + ], + "systemInstruction": { + "parts": [ + { + "text": "You are an expert at analyzing transcripts." + } + ] + }, + "ttl": "300s" +}' > request.json + +curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -d @request.json \ + > cache.json + +CACHE_NAME=$(cat cache.json | grep '"name":' | cut -d '"' -f 4 | head -n 1) + +echo "[START cache_generate_content]" +# [START cache_generate_content] +curl -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-001:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{ + "contents": [ + { + "parts":[{ + "text": "Please summarize this transcript" + }], + "role": "user" + }, + ], + "cachedContent": "'$CACHE_NAME'" + }' +# [END cache_generate_content] +# [END cache_create] +rm a11.txt request.json + +echo "[START cache_list]" +# [START cache_list] +curl "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" +# [END cache_list] + +echo "[START cache_get]" +# [START cache_get] +curl "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GOOGLE_API_KEY" +# [END cache_get] + +echo "[START cache_update]" +# [START cache_update] +curl -X PATCH "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -d '{"ttl": "600s"}' +# [END cache_update] + +echo "[START cache_delete]" +# [START cache_delete] +curl -X DELETE "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GOOGLE_API_KEY" +# [END cache_delete] \ No newline at end of file From 87c9b0638220b7c2870676cd1cb38f1f73b0e845 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 24 Jul 2024 10:31:42 -0700 Subject: [PATCH 27/90] rename embeddings -> embed (#487) Change-Id: Ib78ca1d9803759664d652455624e35d8076235b6 --- samples/rest/{embeddings.sh => ebed.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename samples/rest/{embeddings.sh => ebed.sh} (100%) diff --git a/samples/rest/embeddings.sh b/samples/rest/ebed.sh similarity index 100% rename from samples/rest/embeddings.sh rename to samples/rest/ebed.sh From d0f3359a93a5192238bbf655ed57b6235913aef2 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 24 Jul 2024 10:41:35 -0700 Subject: [PATCH 28/90] M Change-Id: Ibb18fd6c398f781a58782b7f04ceef5d19553cbc --- samples/rest/{ebed.sh => embed.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename samples/rest/{ebed.sh => embed.sh} (100%) diff --git a/samples/rest/ebed.sh b/samples/rest/embed.sh similarity index 100% rename from samples/rest/ebed.sh rename to samples/rest/embed.sh From 74d67ac153e86cc42ca56f63d3053c6854d2b4f9 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Wed, 31 Jul 2024 15:43:49 -0700 Subject: [PATCH 29/90] Code execution for shell (#491) * Code execution for shell * Update code_execution.sh --------- Co-authored-by: Mark Daoust --- samples/rest/code_execution.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 samples/rest/code_execution.sh diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh new file mode 100644 index 000000000..de231161b --- /dev/null +++ b/samples/rest/code_execution.sh @@ -0,0 +1,15 @@ +set -eu + +echo "[START code_execution_basic]" +# [START code_execution_basic] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d ' {"tools": ["code_execution"], + "contents": { + "parts": + {"text": "What is the sum of the first 50 prime numbers? " + "Generate and run code for the calculation, and make sure you get all 50."} + } + }, + }' +# [END code_execution_basic] From a79d2aa02702f4c4e6fc923cdd9cc9832afc2876 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 31 Jul 2024 15:57:24 -0700 Subject: [PATCH 30/90] Fix example (#494) Change-Id: I89d38b2fb38be407583ad1522b8b00a19efed03d --- samples/rest/code_execution.sh | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh index de231161b..494cd2d41 100644 --- a/samples/rest/code_execution.sh +++ b/samples/rest/code_execution.sh @@ -4,12 +4,13 @@ echo "[START code_execution_basic]" # [START code_execution_basic] curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ --d ' {"tools": ["code_execution"], +-d ' { + "tools": [{"code_execution": {}}], "contents": { - "parts": - {"text": "What is the sum of the first 50 prime numbers? " - "Generate and run code for the calculation, and make sure you get all 50."} - } - }, - }' + "parts": { + "text": "What is the sum of the first 50 prime numbers? Generate + and run code for the calculation, and make sure you get all 50." + } + } +}' # [END code_execution_basic] From b5b20ed2e3fb0e41d3b573756920562592095243 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 31 Jul 2024 15:57:42 -0700 Subject: [PATCH 31/90] make cachig visible, and make imports alphabetical (#493) Change-Id: I1a1d2f5f103927e5f50f0f726911599eaeea1a58 --- google/generativeai/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/google/generativeai/__init__.py b/google/generativeai/__init__.py index 4fe362689..19341b625 100644 --- a/google/generativeai/__init__.py +++ b/google/generativeai/__init__.py @@ -42,10 +42,11 @@ from google.generativeai import version +from google.generativeai import caching from google.generativeai import protos from google.generativeai import types -from google.generativeai.types import GenerationConfig +from google.generativeai.client import configure from google.generativeai.discuss import chat from google.generativeai.discuss import chat_async @@ -62,10 +63,6 @@ from google.generativeai.generative_models import GenerativeModel from google.generativeai.generative_models import ChatSession -from google.generativeai.text import generate_text -from google.generativeai.text import generate_embeddings -from google.generativeai.text import count_text_tokens - from google.generativeai.models import list_models from google.generativeai.models import list_tuned_models @@ -80,8 +77,11 @@ from google.generativeai.operations import list_operations from google.generativeai.operations import get_operation +from google.generativeai.text import generate_text +from google.generativeai.text import generate_embeddings +from google.generativeai.text import count_text_tokens -from google.generativeai.client import configure +from google.generativeai.types import GenerationConfig __version__ = version.__version__ From 42d952a2429fa72c0f2617d71eb9aa3f708b6867 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Thu, 1 Aug 2024 19:55:30 -0700 Subject: [PATCH 32/90] Ce rest (#495) * Code execution for shell * Update code_execution.sh * Add code execution chat * Tested code execution examples * Use multi-round chat Change-Id: Ib11b69899c136550871c670b947f9cff0f480d6d --------- Co-authored-by: Mark Daoust --- samples/code_execution.py | 1 + samples/rest/code_execution.sh | 54 ++++++++++++++++++++++++++++++---- 2 files changed, 49 insertions(+), 6 deletions(-) diff --git a/samples/code_execution.py b/samples/code_execution.py index 6b5c97dc4..019c6b344 100644 --- a/samples/code_execution.py +++ b/samples/code_execution.py @@ -142,6 +142,7 @@ def test_code_execution_chat(self): # [START code_execution_chat] model = genai.GenerativeModel(model_name="gemini-1.5-pro", tools="code_execution") chat = model.start_chat() + response = chat.send_message('Can you print "Hello world!"?') response = chat.send_message( ( "What is the sum of the first 50 prime numbers? " diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh index 494cd2d41..e7efe2e1f 100644 --- a/samples/rest/code_execution.sh +++ b/samples/rest/code_execution.sh @@ -4,13 +4,55 @@ echo "[START code_execution_basic]" # [START code_execution_basic] curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ --d ' { - "tools": [{"code_execution": {}}], +-d ' {"tools": [{'code_execution': {}}], "contents": { - "parts": { - "text": "What is the sum of the first 50 prime numbers? Generate - and run code for the calculation, and make sure you get all 50." + "parts": + { + "text": "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50." } - } + }, }' # [END code_execution_basic] + +echo "[START code_execution_chat]" +# [START code_execution_chat] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{"tools": [{'code_execution': {}}], + "contents": [ + { + "role": "user", + "parts": [{ + "text": "Can you print \"Hello world!\"?" + }] + },{ + "role": "model", + "parts": [ + { + "text": "" + }, + { + "executable_code": { + "language": "PYTHON", + "code": "\nprint(\"hello world!\")\n" + } + }, + { + "code_execution_result": { + "outcome": "OUTCOME_OK", + "output": "hello world!\n" + } + }, + { + "text": "I have printed \"hello world!\" using the provided python code block. \n" + } + ], + },{ + "role": "user", + "parts": [{ + "text": "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50." + }] + } + ] +}' +# [END code_execution_chat] From 36123289097d91fc5857303a85c6c428535411ba Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 7 Aug 2024 06:48:57 -0700 Subject: [PATCH 33/90] Fix response_schema sample. (#498) Change-Id: Id06f8556aef0a9b20204992b34c1d4bbc1437ef9 --- samples/controlled_generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/controlled_generation.py b/samples/controlled_generation.py index e46b1a912..b0c269bb7 100644 --- a/samples/controlled_generation.py +++ b/samples/controlled_generation.py @@ -27,7 +27,7 @@ class Recipe(typing.TypedDict): result = model.generate_content( "List a few popular cookie recipes.", generation_config=genai.GenerationConfig( - response_mime_type="application/json", response_schema=list([Recipe]) + response_mime_type="application/json", response_schema=list[Recipe] ), ) print(result) From db311dd3f26233206a65b946a17417837ae12905 Mon Sep 17 00:00:00 2001 From: Mark McDonald Date: Wed, 14 Aug 2024 10:26:31 +0800 Subject: [PATCH 34/90] Add a README for /samples (#507) * Add a README for /samples * Add rest/README.md * Add an action to keep updated * Typo * Fix action workflow * Reworked action a bit * Extra info line --- .github/workflows/samples.yaml | 82 ++++++++++++++++++++++++++++++++++ samples/README.md | 26 +++++++++++ samples/rest/README.md | 25 +++++++++++ 3 files changed, 133 insertions(+) create mode 100644 .github/workflows/samples.yaml create mode 100644 samples/README.md create mode 100644 samples/rest/README.md diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml new file mode 100644 index 000000000..4c76a563b --- /dev/null +++ b/.github/workflows/samples.yaml @@ -0,0 +1,82 @@ +name: Validate samples + +on: + pull_request: + types: [opened, synchronize] # new, updates + +jobs: + update-python-list: + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Get Changed Files + id: changed_files + uses: tj-actions/changed-files@v44 + with: + files: | + samples/*.py + + - name: Check Python samples + env: + NEW_FILES: ${{ steps.changed_files.outputs.all_modified_files }} + README: samples/README.md + run: | + #!/bin/bash + + for file in ${NEW_FILES}; do + echo "Testing $file" + if [[ -f ${file} ]]; then + # File exists, so needs to be listed. + if ! grep -q $name ${README}; then + echo "Error: Sample not listed in README ($name)" + exit 1 + fi + else + # File does not exist, ensure it's not listed + if grep -q $name ${README}; then + echo "Error: Sample should not be listed in README ($name)" + exit 1 + fi + fi + done + + update-rest-list: + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v3 + + - name: Get Changed Files + id: changed_files + uses: tj-actions/changed-files@v44 + with: + files: | + samples/rest/*.sh + + - name: Check REST samples + env: + NEW_FILES: ${{ steps.changed_files.outputs.all_modified_files }} + README: samples/rest/README.md + run: | + #!/bin/bash + + for file in ${NEW_FILES}; do + echo "Testing $file" + if [[ -f ${file} ]]; then + # File exists, so needs to be listed. + if ! grep -q $name ${README}; then + echo "Error: Sample not listed in README ($name)" + exit 1 + fi + else + # File does not exist, ensure it's not listed + if grep -q $name ${README}; then + echo "Error: Sample should not be listed in README ($name)" + exit 1 + fi + fi + done diff --git a/samples/README.md b/samples/README.md new file mode 100644 index 000000000..cc56cda75 --- /dev/null +++ b/samples/README.md @@ -0,0 +1,26 @@ +# Gemini API Python SDK sample code + +This directory contains sample code for key features of the SDK, organised by high level feature. + +These samples are embedded in parts of the [documentation](https://ai.google.dev), most notably in the [API reference](https://ai.google.dev/api). + +Each file is structured as a runnable test case, ensuring that samples are executable and functional. Each test demonstrates a single concept, and contains region tags that are used to demarcate the test scaffolding from the spotlight code. If you are contributing, code within region tags should follow sample code best practices - being clear, complete and concise. + +## Contents + +| File | Description | +| ---- | ----------- | +| [cache.py](./cache.py) | Context caching | +| [chat.py](./chat.py) | Multi-turn chat conversations | +| [code_execution.py](./code_execution.py) | Executing code | +| [configure_model_parameters.py](./configure_model_parameters.py) | Setting model parameters | +| [controlled_generation.py](./controlled_generation.py) | Generating content with output constraints (e.g. JSON mode) | +| [count_tokens.py](./count_tokens.py) | Counting input and output tokens | +| [embed.py](./embed.py) | Generating embeddings | +| [files.py](./files.py) | Managing files with the File API | +| [function_calling.py](./function_calling.py) | Using function calling | +| [models.py](./models.py) | Listing models and model metadata | +| [safety_settings.py](./safety_settings.py) | Setting and using safety controls | +| [system_instruction.py](./system_instruction.py) | Setting system instructions | +| [text_generation.py](./text_generation.py) | Generating text | +| [tuned_models.py](./tuned_models.py) | Creating and managing tuned models | diff --git a/samples/rest/README.md b/samples/rest/README.md new file mode 100644 index 000000000..bff8867cf --- /dev/null +++ b/samples/rest/README.md @@ -0,0 +1,25 @@ +# Gemini API REST sample code + +This directory contains sample code for key features of the API, organised by high level feature. + +These samples are embedded in parts of the [documentation](https://ai.google.dev), most notably in the [API reference](https://ai.google.dev/api). + +Each file is structured as a runnable script, ensuring that samples are executable and functional. Each filee contains region tags that are used to demarcate the script from the spotlight code. If you are contributing, code within region tags should follow sample code best practices - being clear, complete and concise. + +## Contents + +| File | Description | +| ---- | ----------- | +| [cache.sh](./cache.sh) | Context caching | +| [chat.sh](./chat.sh) | Multi-turn chat conversations | +| [code_execution.sh](./code_execution.sh) | Executing code | +| [configure_model_parameters.sh](./configure_model_parameters.sh) | Setting model parameters | +| [controlled_generation.sh](./controlled_generation.sh) | Generating content with output constraints (e.g. JSON mode) | +| [count_tokens.sh](./count_tokens.sh) | Counting input and output tokens | +| [embed.sh](./embed.sh) | Generating embeddings | +| [files.sh](./files.sh) | Managing files with the File API | +| [function_calling.sh](./function_calling.sh) | Using function calling | +| [models.sh](./models.sh) | Listing models and model metadata | +| [safety_settings.sh](./safety_settings.sh) | Setting and using safety controls | +| [system_instruction.sh](./system_instruction.sh) | Setting system instructions | +| [text_generation.sh](./text_generation.sh) | Generating text | From a8edb4009bb52b98127b1d83e89f63724a7cd86b Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Fri, 16 Aug 2024 13:53:01 -0700 Subject: [PATCH 35/90] Add additional functions to files & count_tokens. (#490) * Add PDF file function * Add rest of count_tokens examples * Add context window count * Tested and fixed count_tokens.sh * Updated files.sh to simplify pdf * Update samples.yaml to get basename of file * Update samples.yaml to get basename of file * Update samples.yaml to get basename of file * Update samples.yaml to get basename of file * test samples.yaml --- .github/workflows/samples.yaml | 3 + samples/rest/count_tokens.sh | 125 +++++++++++++++++++++++++++++++-- samples/rest/files.sh | 49 +++++++++++++ 3 files changed, 172 insertions(+), 5 deletions(-) diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml index 4c76a563b..71c457cfa 100644 --- a/.github/workflows/samples.yaml +++ b/.github/workflows/samples.yaml @@ -68,12 +68,15 @@ jobs: echo "Testing $file" if [[ -f ${file} ]]; then # File exists, so needs to be listed. + echo $(basename $file) + name=$(basename $file) if ! grep -q $name ${README}; then echo "Error: Sample not listed in README ($name)" exit 1 fi else # File does not exist, ensure it's not listed + name=$(basename $file) if grep -q $name ${README}; then echo "Error: Sample should not be listed in README ($name)" exit 1 diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh index 5d4f08d14..e69fd6d1c 100644 --- a/samples/rest/count_tokens.sh +++ b/samples/rest/count_tokens.sh @@ -4,6 +4,7 @@ SCRIPT_DIR=$(dirname "$0") MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party) TEXT_PATH=${MEDIA_DIR}/poem.txt +A11_PATH=${MEDIA_DIR}/a11.txt IMG_PATH=${MEDIA_DIR}/organ.jpg AUDIO_PATH=${MEDIA_DIR}/sample.mp3 VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4 @@ -16,6 +17,13 @@ else B64FLAGS="-w0" fi +echo "[START tokens_context_window]" +# [START tokens_context_window] +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro?key=$GOOGLE_API_KEY > model.json +jq .inputTokenLimit model.json +jq .outputTokenLimit model.json +# [END tokens_context_window] + echo "[START tokens_text_only]" # [START tokens_text_only] curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY \ @@ -97,7 +105,6 @@ curl "${upload_url}" \ --data-binary "@${IMG_PATH}" 2> /dev/null > file_info.json file_uri=$(jq ".file.uri" file_info.json) -echo file_uri=$file_uri curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ @@ -143,13 +150,10 @@ curl "${upload_url}" \ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json file_uri=$(jq ".file.uri" file_info.json) -echo file_uri=$file_uri state=$(jq ".file.state" file_info.json) -echo state=$state name=$(jq ".file.name" file_info.json) -echo name=$name while [[ "($state)" = *"PROCESSING"* ]]; do @@ -170,4 +174,115 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:c {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}] }] }' -# [END tokens_multimodal_video_audio_file_api] \ No newline at end of file +# [END tokens_multimodal_video_audio_file_api] + +echo "[START tokens_cached_content]" +# [START tokens_cached_content] +echo '{ + "model": "models/gemini-1.5-flash-001", + "contents":[ + { + "parts":[ + { + "inline_data": { + "mime_type":"text/plain", + "data": "'$(base64 $B64FLAGS $A11_PATH)'" + } + } + ], + "role": "user" + } + ], + "systemInstruction": { + "parts": [ + { + "text": "You are an expert at analyzing transcripts." + } + ] + }, + "ttl": "300s" +}' > request.json + +curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -d @request.json \ + > cache.json + +jq .usageMetadata.totalTokenCount cache.json +# [END tokens_cached_content] + +echo "[START tokens_system_instruction]" +# [START tokens_system_instruction] +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +-H 'Content-Type: application/json' \ +-d '{ "system_instruction": { + "parts": + { "text": "You are a cat. Your name is Neko."}}, + "contents": { + "parts": { + "text": "Hello there"}}}' > system_instructions.json + +jq .usageMetadata.totalTokenCount system_instructions.json +# [END tokens_system_instruction] + +echo "[START tokens_tools]" +# [START tokens_tools] +cat > tools.json << EOF +{ + "function_declarations": [ + { + "name": "enable_lights", + "description": "Turn on the lighting system.", + "parameters": { "type": "object" } + }, + { + "name": "set_light_color", + "description": "Set the light color. Lights must be enabled for this to work.", + "parameters": { + "type": "object", + "properties": { + "rgb_hex": { + "type": "string", + "description": "The light color as a 6-digit hex string, e.g. ff0000 for red." + } + }, + "required": [ + "rgb_hex" + ] + } + }, + { + "name": "stop_lights", + "description": "Turn off the lighting system.", + "parameters": { "type": "object" } + } + ] +} +EOF + +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -d ' + { + "system_instruction": { + "parts": { + "text": "You are a helpful lighting system bot. You can turn lights on and off, and you can set the color. Do not perform any other tasks." + } + }, + "tools": ['$(source "$tools")'], + + "tool_config": { + "function_calling_config": {"mode": "none"} + }, + + "contents": { + "role": "user", + "parts": { + "text": "What can you do?" + } + } + } +' > tools_output.json + +jq .usageMetadata.totalTokenCount tools_output.json +# [END tokens_tools] \ No newline at end of file diff --git a/samples/rest/files.sh b/samples/rest/files.sh index ae44b7467..8f292c4f6 100644 --- a/samples/rest/files.sh +++ b/samples/rest/files.sh @@ -8,6 +8,7 @@ IMG_PATH=${MEDIA_DIR}/organ.jpg IMG_PATH_2=${MEDIA_DIR}/Cajun_instruments.jpg AUDIO_PATH=${MEDIA_DIR}/sample.mp3 VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4 +PDF_PATH=${MEDIA_DIR}/test.pdf BASE_URL="https://generativelanguage.googleapis.com" @@ -243,6 +244,54 @@ echo jq ".candidates[].content.parts[].text" response.json # [END files_create_video] +echo "[START files_create_pdf]" +# [START files_create_pdf] +NUM_BYTES=$(wc -c < "${PDF_PATH}") +DISPLAY_NAME=TEXT +tmp_header_file=upload-header.tmp + +# Initial resumable request defining metadata. +# The upload url is in the response headers dump them to a file. +curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \ + -D upload-header.tmp \ + -H "X-Goog-Upload-Protocol: resumable" \ + -H "X-Goog-Upload-Command: start" \ + -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Header-Content-Type: application/pdf" \ + -H "Content-Type: application/json" \ + -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null + +upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r") +rm "${tmp_header_file}" + +# Upload the actual bytes. +curl "${upload_url}" \ + -H "Content-Length: ${NUM_BYTES}" \ + -H "X-Goog-Upload-Offset: 0" \ + -H "X-Goog-Upload-Command: upload, finalize" \ + --data-binary "@${PDF_PATH}" 2> /dev/null > file_info.json + +file_uri=$(jq ".file.uri" file_info.json) +echo file_uri=$file_uri + +# Now generate content using that file +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ + -H 'Content-Type: application/json' \ + -X POST \ + -d '{ + "contents": [{ + "parts":[ + {"text": "Can you add a few more lines to this poem?"}, + {"file_data":{"mime_type": "application/pdf", "file_uri": '$file_uri'}}] + }] + }' 2> /dev/null > response.json + +cat response.json +echo + +jq ".candidates[].content.parts[].text" response.json +# [END files_create_pdf] + echo "[START files_list]" # [START files_list] echo "My files: " From 8a29017e9120f0552ee3ad6092e8545d1aa6f803 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Wed, 21 Aug 2024 16:23:23 -0700 Subject: [PATCH 36/90] REST for tuned models (#496) * REST for tuned models * Some fixes. Change-Id: I70d066ceacc7c07e27ac59359da87d9b9747353b * add progress reporting, add page_token Change-Id: I7881d6e5703d5bb027329aa22d0572132e024703 * Add delete tuned models * Update readme --------- Co-authored-by: Mark Daoust --- samples/rest/README.md | 1 + samples/rest/tuned_models.sh | 159 +++++++++++++++++++++++++++++++++++ 2 files changed, 160 insertions(+) create mode 100644 samples/rest/tuned_models.sh diff --git a/samples/rest/README.md b/samples/rest/README.md index bff8867cf..7969097d2 100644 --- a/samples/rest/README.md +++ b/samples/rest/README.md @@ -23,3 +23,4 @@ Each file is structured as a runnable script, ensuring that samples are executab | [safety_settings.sh](./safety_settings.sh) | Setting and using safety controls | | [system_instruction.sh](./system_instruction.sh) | Setting system instructions | | [text_generation.sh](./text_generation.sh) | Generating text | +| [tuned_models.sh](./tuned_models.sh) | Tuned models | diff --git a/samples/rest/tuned_models.sh b/samples/rest/tuned_models.sh new file mode 100644 index 000000000..1e105377e --- /dev/null +++ b/samples/rest/tuned_models.sh @@ -0,0 +1,159 @@ +set -eu + +access_token=$(gcloud auth application-default print-access-token) + + +echo "[START tuned_models_create]" +# [START tuned_models_create] +curl -X POST https://generativelanguage.googleapis.com/v1beta/tunedModels \ + -H 'Content-Type: application/json' \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" \ + -d ' + { + "display_name": "number generator model", + "base_model": "models/gemini-1.0-pro-001", + "tuning_task": { + "hyperparameters": { + "batch_size": 2, + "learning_rate": 0.001, + "epoch_count":5, + }, + "training_data": { + "examples": { + "examples": [ + { + "text_input": "1", + "output": "2", + },{ + "text_input": "3", + "output": "4", + },{ + "text_input": "-3", + "output": "-2", + },{ + "text_input": "twenty two", + "output": "twenty three", + },{ + "text_input": "two hundred", + "output": "two hundred one", + },{ + "text_input": "ninety nine", + "output": "one hundred", + },{ + "text_input": "8", + "output": "9", + },{ + "text_input": "-98", + "output": "-97", + },{ + "text_input": "1,000", + "output": "1,001", + },{ + "text_input": "10,100,000", + "output": "10,100,001", + },{ + "text_input": "thirteen", + "output": "fourteen", + },{ + "text_input": "eighty", + "output": "eighty one", + },{ + "text_input": "one", + "output": "two", + },{ + "text_input": "three", + "output": "four", + },{ + "text_input": "seven", + "output": "eight", + } + ] + } + } + } + }' | tee tunemodel.json + +# Check the operation for status updates during training. +# Note: you can only check the operation on v1/ +operation=$(cat tunemodel.json | jq ".name" | tr -d '"') +tuning_done=false + +while [[ "$tuning_done" != "true" ]]; +do + sleep 5 + curl -X GET https://generativelanguage.googleapis.com/v1/${operation} \ + -H 'Content-Type: application/json' \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" 2> /dev/null > tuning_operation.json + + complete=$(jq .metadata.completedPercent < tuning_operation.json) + tput cuu1 + tput el + echo "Tuning...${complete}%" + tuning_done=$(jq .done < tuning_operation.json) +done + +# Or get the TunedModel and check it's state. The model is ready to use if the state is active. +modelname=$(cat tunemodel.json | jq ".metadata.tunedModel" | tr -d '"') +curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname} \ + -H 'Content-Type: application/json' \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" > tuned_model.json + +cat tuned_model.json | jq ".state" +# [END tuned_models_create] + + +echo "[START tuned_models_generate_content]" +# [START tuned_models_generate_content] +curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generateContent \ + -H 'Content-Type: application/json' \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" \ + -d '{ + "contents": [{ + "parts": [{ + "text": "LXIII" + }] + }] + }' 2> /dev/null +# [END tuned_models_generate_content] + +echo "[START tuned_models_get]" +# [START tuned_models_get] +curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname} \ + -H 'Content-Type: application/json' \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" | grep state +# [END tuned_models_get] + +echo "[START tuned_models_list]" +# [START tuned_models_list] +# Sending a page_size is optional +curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" > tuned_models.json + +jq .tunedModels[].name < tuned_models.json + +# Send the nextPageToken to get the next page. +page_token=$(jq .nextPageToken < tuned_models.json | tr -d '"') + +if [[ "$page_token" != "null"" ]]; then +curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5\&page_token=${page_token} \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" > tuned_models2.json +jq .tunedModels[].name < tuned_models.json +fi +# [END tuned_models_list] + +echo "[START tuned_models_delete]" +# [START tuned_models_delete] +curl -X DELETE https://generativelanguage.googleapis.com/v1beta/${modelname} \ + -H 'Content-Type: application/json' \ + -H "Authorization: Bearer ${access_token}" \ + -H "x-goog-user-project: ${project_id}" +# [END tuned_models_delete] \ No newline at end of file From 8a96b862ec36f9a96a5d9f879915f006aeffecb6 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 22 Aug 2024 13:22:10 -0700 Subject: [PATCH 37/90] Use flash more often. (#517) * Use flash more often. Change-Id: If20e5d5e8462d160681d9dc2bfec965fd94fb633 * format Change-Id: I5a47b80da6f07b26a8079e33b2350ace3454bb50 * fix link Change-Id: If23c8b48e53238c99d71d68f3669e521a5a82c2f * fix check Change-Id: Iac2f1bd545395949cf013fc2fbc6c91716766d1d --- .github/workflows/samples.yaml | 1 + README.md | 2 +- samples/README.md | 30 ++++++++++++------------- samples/code_execution.py | 6 ++--- samples/count_tokens.py | 6 ++--- samples/rest/code_execution.sh | 4 ++-- samples/rest/controlled_generation.sh | 4 ++-- samples/rest/models.sh | 2 +- samples/rest/safety_settings.sh | 32 ++++++++++++--------------- samples/rest/system_instruction.sh | 2 +- samples/tuned_models.py | 4 ++-- 11 files changed, 45 insertions(+), 48 deletions(-) diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml index 71c457cfa..04f2611aa 100644 --- a/.github/workflows/samples.yaml +++ b/.github/workflows/samples.yaml @@ -28,6 +28,7 @@ jobs: for file in ${NEW_FILES}; do echo "Testing $file" + name=$(basename $file) if [[ -f ${file} ]]; then # File exists, so needs to be listed. if ! grep -q $name ${README}; then diff --git a/README.md b/README.md index 99d387bd7..19db267de 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,7 @@ genai.configure(api_key=os.environ["GEMINI_API_KEY"]) 3. Create a model and run a prompt. ```python -model = genai.GenerativeModel('gemini-1.0-pro-latest') +model = genai.GenerativeModel('gemini-1.5-flash') response = model.generate_content("The opposite of hot is") print(response.text) ``` diff --git a/samples/README.md b/samples/README.md index cc56cda75..ce2e9d243 100644 --- a/samples/README.md +++ b/samples/README.md @@ -8,19 +8,19 @@ Each file is structured as a runnable test case, ensuring that samples are execu ## Contents -| File | Description | -| ---- | ----------- | -| [cache.py](./cache.py) | Context caching | -| [chat.py](./chat.py) | Multi-turn chat conversations | -| [code_execution.py](./code_execution.py) | Executing code | +| File | Description | +|----------------------------------------------------------| ----------- | +| [cache.py](./cache.py) | Context caching | +| [chat.py](./chat.py) | Multi-turn chat conversations | +| [code_execution.py](./code_execution.py) | Executing code | | [configure_model_parameters.py](./configure_model_parameters.py) | Setting model parameters | -| [controlled_generation.py](./controlled_generation.py) | Generating content with output constraints (e.g. JSON mode) | -| [count_tokens.py](./count_tokens.py) | Counting input and output tokens | -| [embed.py](./embed.py) | Generating embeddings | -| [files.py](./files.py) | Managing files with the File API | -| [function_calling.py](./function_calling.py) | Using function calling | -| [models.py](./models.py) | Listing models and model metadata | -| [safety_settings.py](./safety_settings.py) | Setting and using safety controls | -| [system_instruction.py](./system_instruction.py) | Setting system instructions | -| [text_generation.py](./text_generation.py) | Generating text | -| [tuned_models.py](./tuned_models.py) | Creating and managing tuned models | +| [controlled_generation.py](./controlled_generation.py) | Generating content with output constraints (e.g. JSON mode) | +| [count_tokens.py](./count_tokens.py) | Counting input and output tokens | +| [embed.py](./embed.py) | Generating embeddings | +| [files.py](./files.py) | Managing files with the File API | +| [function_calling.py](./function_calling.py) | Using function calling | +| [models.py](./models.py) | Listing models and model metadata | +| [safety_settings.py](./safety_settings.py) | Setting and using safety controls | +| [system_instruction.py](./system_instruction.py) | Setting system instructions | +| [text_generation.py](./text_generation.py) | Generating text | +| [tuned_models.py](./tuned_models.py) | Creating and managing tuned models | diff --git a/samples/code_execution.py b/samples/code_execution.py index 019c6b344..9b5ad3638 100644 --- a/samples/code_execution.py +++ b/samples/code_execution.py @@ -29,7 +29,7 @@ def test_code_execution_basic(self): ) # Each `part` either contains `text`, `executable_code` or an `execution_result` - for part in result.candidates[0].content.parts: + for part in response.candidates[0].content.parts: print(part, "\n") print("-" * 80) @@ -92,7 +92,7 @@ def test_code_execution_basic(self): def test_code_execution_request_override(self): # [START code_execution_request_override] - model = genai.GenerativeModel(model_name="gemini-1.5-pro") + model = genai.GenerativeModel(model_name="gemini-1.5-flash") response = model.generate_content( ( "What is the sum of the first 50 prime numbers? " @@ -140,7 +140,7 @@ def test_code_execution_request_override(self): def test_code_execution_chat(self): # [START code_execution_chat] - model = genai.GenerativeModel(model_name="gemini-1.5-pro", tools="code_execution") + model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools="code_execution") chat = model.start_chat() response = chat.send_message('Can you print "Hello world!"?') response = chat.send_message( diff --git a/samples/count_tokens.py b/samples/count_tokens.py index 74a9e4881..4d52684d9 100644 --- a/samples/count_tokens.py +++ b/samples/count_tokens.py @@ -23,7 +23,7 @@ class UnitTests(absltest.TestCase): def test_tokens_context_window(self): # [START tokens_context_window] - model_info = genai.get_model("models/gemini-1.0-pro-001") + model_info = genai.get_model("models/gemini-1.5-flash") # Returns the "context window" for the model, # which is the combined input and output token limits. @@ -91,7 +91,7 @@ def test_tokens_multimodal_image_inline(self): model = genai.GenerativeModel("models/gemini-1.5-flash") prompt = "Tell me about this image" - your_image_file = PIL.Image.open("image.jpg") + your_image_file = PIL.Image.open(media / "organ.jpg") # Call `count_tokens` to get the input token count # of the combined text and file (`total_tokens`). @@ -115,7 +115,7 @@ def test_tokens_multimodal_image_file_api(self): model = genai.GenerativeModel("models/gemini-1.5-flash") prompt = "Tell me about this image" - your_image_file = genai.upload_file(path="image.jpg") + your_image_file = genai.upload_file(path=media / "organ.jpg") # Call `count_tokens` to get the input token count # of the combined text and file (`total_tokens`). diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh index e7efe2e1f..73c297777 100644 --- a/samples/rest/code_execution.sh +++ b/samples/rest/code_execution.sh @@ -2,7 +2,7 @@ set -eu echo "[START code_execution_basic]" # [START code_execution_basic] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -d ' {"tools": [{'code_execution': {}}], "contents": { @@ -16,7 +16,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-lat echo "[START code_execution_chat]" # [START code_execution_chat] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -d '{"tools": [{'code_execution': {}}], "contents": [ diff --git a/samples/rest/controlled_generation.sh b/samples/rest/controlled_generation.sh index 69da2dac7..533870649 100644 --- a/samples/rest/controlled_generation.sh +++ b/samples/rest/controlled_generation.sh @@ -2,7 +2,7 @@ set -eu echo "json_controlled_generation" # [START json_controlled_generation] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "contents": [{ @@ -27,7 +27,7 @@ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-lat echo "json_no_schema" # [START json_no_schema] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "contents": [{ diff --git a/samples/rest/models.sh b/samples/rest/models.sh index ebcd378ff..a03d5585b 100644 --- a/samples/rest/models.sh +++ b/samples/rest/models.sh @@ -7,5 +7,5 @@ curl https://generativelanguage.googleapis.com/v1beta/models?key=$GOOGLE_API_KEY echo "[START models_get]" # [START models_get] -curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro?key=$GOOGLE_API_KEY +curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash?key=$GOOGLE_API_KEY # [END models_get] diff --git a/samples/rest/safety_settings.sh b/samples/rest/safety_settings.sh index f7eb45186..713d25c06 100644 --- a/samples/rest/safety_settings.sh +++ b/samples/rest/safety_settings.sh @@ -2,37 +2,33 @@ set -eu echo "[START safety_settings]" # [START safety_settings] -echo '{ + echo '{ "safetySettings": [ - {'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH} + {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"} ], "contents": [{ "parts":[{ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json - curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \ + curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ - -d @request.json 2> /dev/null > tee response.json - - jq .promptFeedback > response.json + -d @request.json 2> /dev/null # [END safety_settings] echo "[START safety_settings_multi]" # [START safety_settings_multi] -echo '{ - "safetySettings": [ - {'category': HARM_CATEGORY_HARASSMENT, 'threshold': BLOCK_ONLY_HIGH}, - {'category': HARM_CATEGORY_HATE_SPEECH, 'threshold': BLOCK_MEDIUM_AND_ABOVE} - ], - "contents": [{ - "parts":[{ - "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json + echo '{ + "safetySettings": [ + {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"}, + {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"} + ], + "contents": [{ + "parts":[{ + "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json - curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY" \ + curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -X POST \ - -d @request.json 2> /dev/null > response.json - - jq .promptFeedback > response.json + -d @request.json 2> /dev/null # [END safety_settings_multi] diff --git a/samples/rest/system_instruction.sh b/samples/rest/system_instruction.sh index 6a32c8f58..1e4c36d6c 100644 --- a/samples/rest/system_instruction.sh +++ b/samples/rest/system_instruction.sh @@ -2,7 +2,7 @@ set -eu echo "[START system_instruction]" # [START system_instruction] -curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \ +curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "system_instruction": { "parts": diff --git a/samples/tuned_models.py b/samples/tuned_models.py index d328d8c30..8ecad73de 100644 --- a/samples/tuned_models.py +++ b/samples/tuned_models.py @@ -27,7 +27,7 @@ def test_tuned_models_create(self): # [START tuned_models_create] import time - base_model = "models/gemini-1.0-pro-001" + base_model = "models/gemini-1.5-flash-001-tuning" training_data = [ {"text_input": "1", "output": "2"}, # ... more examples ... @@ -94,7 +94,7 @@ def test_tuned_models_list(self): def test_tuned_models_delete(self): import time - base_model = "models/gemini-1.0-pro-001" + base_model = "models/gemini-1.5-flash-001-tuning" training_data = samples / "increment_tuning_data.json" try: operation = genai.create_tuned_model( From 526fe03982d3dcd51a7cdf493d52bc9e81d9be5f Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Thu, 22 Aug 2024 13:31:27 -0700 Subject: [PATCH 38/90] Fix the missing closing backticks in CONTRIBUTING (#514) The rendering is broken due to the missing closing backticks. --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9415df2a8..0e4179149 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -93,10 +93,11 @@ pytest Or to debug, use: -```commandline +``` pip install nose2 nose2 --debugger +``` ### Type checking @@ -124,7 +125,6 @@ black . python docs/build_docs.py ``` - [setup]: https://cloud.google.com/nodejs/docs/setup [projects]: https://console.cloud.google.com/project [billing]: https://support.google.com/cloud/answer/6293499#enable-billing From 7342a62b9d53a1c188ba1695e9742d9a9b7efb1d Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Thu, 22 Aug 2024 13:46:06 -0700 Subject: [PATCH 39/90] Remove DiscussService and TextService (#512) * Remove DiscussService and TextService * Remove references to TextService in answer.py, remove refs in client.py * Remove reference from test case for client * Modify text_types.py for embeddings * Delete certain markdown files, update other mds * Delete generate emb md * Fix tests * Revert "Delete generate emb md" This reverts commit d4177f757eeb56723b3dc0444e3984d153a1c093. * Revert "Delete certain markdown files, update other mds" This reverts commit dca0b1fed45fb3ab9f246a83c645112f664925ec. * Reformat * Added other clients to test_client parameters * Update google/generativeai/client.py --------- Co-authored-by: Mark Daoust --- google/generativeai/__init__.py | 10 - google/generativeai/answer.py | 4 +- google/generativeai/client.py | 18 +- google/generativeai/discuss.py | 599 --------------------- google/generativeai/embedding.py | 2 +- google/generativeai/text.py | 347 ------------ google/generativeai/types/__init__.py | 4 - google/generativeai/types/discuss_types.py | 208 ------- google/generativeai/types/text_types.py | 43 -- tests/test_client.py | 32 +- tests/test_discuss.py | 386 ------------- tests/test_discuss_async.py | 85 --- tests/test_text.py | 542 ------------------- 13 files changed, 23 insertions(+), 2257 deletions(-) delete mode 100644 google/generativeai/discuss.py delete mode 100644 google/generativeai/text.py delete mode 100644 google/generativeai/types/discuss_types.py delete mode 100644 tests/test_discuss.py delete mode 100644 tests/test_discuss_async.py delete mode 100644 tests/test_text.py diff --git a/google/generativeai/__init__.py b/google/generativeai/__init__.py index 19341b625..5b143d768 100644 --- a/google/generativeai/__init__.py +++ b/google/generativeai/__init__.py @@ -48,10 +48,6 @@ from google.generativeai.client import configure -from google.generativeai.discuss import chat -from google.generativeai.discuss import chat_async -from google.generativeai.discuss import count_message_tokens - from google.generativeai.embedding import embed_content from google.generativeai.embedding import embed_content_async @@ -77,19 +73,13 @@ from google.generativeai.operations import list_operations from google.generativeai.operations import get_operation -from google.generativeai.text import generate_text -from google.generativeai.text import generate_embeddings -from google.generativeai.text import count_text_tokens - from google.generativeai.types import GenerationConfig __version__ = version.__version__ -del discuss del embedding del files del generative_models -del text del models del client del operations diff --git a/google/generativeai/answer.py b/google/generativeai/answer.py index 4dd93feaf..83bf5f679 100644 --- a/google/generativeai/answer.py +++ b/google/generativeai/answer.py @@ -283,7 +283,7 @@ def generate_answer( answer_style: Style in which the grounded answer should be returned. safety_settings: Safety settings for generated output. Defaults to None. temperature: Controls the randomness of the output. - client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead. + client: If you're not relying on a default client, you pass a `glm.GenerativeServiceClient` instead. request_options: Options for the request. Returns: @@ -337,7 +337,7 @@ async def generate_answer_async( answer_style: Style in which the grounded answer should be returned. safety_settings: Safety settings for generated output. Defaults to None. temperature: Controls the randomness of the output. - client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead. + client: If you're not relying on a default client, you pass a `glm.GenerativeServiceClient` instead. Returns: A `types.Answer` containing the model's text answer response. diff --git a/google/generativeai/client.py b/google/generativeai/client.py index 7e2193890..01d0a003b 100644 --- a/google/generativeai/client.py +++ b/google/generativeai/client.py @@ -108,9 +108,6 @@ async def create_file(self, *args, **kwargs): class _ClientManager: client_config: dict[str, Any] = dataclasses.field(default_factory=dict) default_metadata: Sequence[tuple[str, str]] = () - - discuss_client: glm.DiscussServiceClient | None = None - discuss_async_client: glm.DiscussServiceAsyncClient | None = None clients: dict[str, Any] = dataclasses.field(default_factory=dict) def configure( @@ -119,7 +116,7 @@ def configure( api_key: str | None = None, credentials: ga_credentials.Credentials | dict | None = None, # The user can pass a string to choose `rest` or `grpc` or 'grpc_asyncio'. - # See `_transport_registry` in `DiscussServiceClientMeta`. + # See _transport_registry in the google.ai.generativelanguage package. # Since the transport classes align with the client classes it wouldn't make # sense to accept a `Transport` object here even though the client classes can. # We could accept a dict since all the `Transport` classes take the same args, @@ -281,7 +278,6 @@ def configure( api_key: str | None = None, credentials: ga_credentials.Credentials | dict | None = None, # The user can pass a string to choose `rest` or `grpc` or 'grpc_asyncio'. - # See `_transport_registry` in `DiscussServiceClientMeta`. # Since the transport classes align with the client classes it wouldn't make # sense to accept a `Transport` object here even though the client classes can. # We could accept a dict since all the `Transport` classes take the same args, @@ -326,14 +322,6 @@ def get_default_cache_client() -> glm.CacheServiceClient: return _client_manager.get_default_client("cache") -def get_default_discuss_client() -> glm.DiscussServiceClient: - return _client_manager.get_default_client("discuss") - - -def get_default_discuss_async_client() -> glm.DiscussServiceAsyncClient: - return _client_manager.get_default_client("discuss_async") - - def get_default_file_client() -> glm.FilesServiceClient: return _client_manager.get_default_client("file") @@ -350,10 +338,6 @@ def get_default_generative_async_client() -> glm.GenerativeServiceAsyncClient: return _client_manager.get_default_client("generative_async") -def get_default_text_client() -> glm.TextServiceClient: - return _client_manager.get_default_client("text") - - def get_default_operations_client() -> operations_v1.OperationsClient: return _client_manager.get_default_client("operations") diff --git a/google/generativeai/discuss.py b/google/generativeai/discuss.py deleted file mode 100644 index 448347b41..000000000 --- a/google/generativeai/discuss.py +++ /dev/null @@ -1,599 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import dataclasses -import sys -import textwrap - -from typing import Iterable, List - -import google.ai.generativelanguage as glm - -from google.generativeai.client import get_default_discuss_client -from google.generativeai.client import get_default_discuss_async_client -from google.generativeai import string_utils -from google.generativeai import protos -from google.generativeai.types import discuss_types -from google.generativeai.types import helper_types -from google.generativeai.types import model_types -from google.generativeai.types import palm_safety_types - - -def _make_message(content: discuss_types.MessageOptions) -> protos.Message: - """Creates a `protos.Message` object from the provided content.""" - if isinstance(content, protos.Message): - return content - if isinstance(content, str): - return protos.Message(content=content) - else: - return protos.Message(content) - - -def _make_messages( - messages: discuss_types.MessagesOptions, -) -> List[protos.Message]: - """ - Creates a list of `protos.Message` objects from the provided messages. - - This function takes a variety of message content inputs, such as strings, dictionaries, - or `protos.Message` objects, and creates a list of `protos.Message` objects. It ensures that - the authors of the messages alternate appropriately. If authors are not provided, - default authors are assigned based on their position in the list. - - Args: - messages: The messages to convert. - - Returns: - A list of `protos.Message` objects with alternating authors. - """ - if isinstance(messages, (str, dict, protos.Message)): - messages = [_make_message(messages)] - else: - messages = [_make_message(message) for message in messages] - - even_authors = set(msg.author for msg in messages[::2] if msg.author) - if not even_authors: - even_author = "0" - elif len(even_authors) == 1: - even_author = even_authors.pop() - else: - raise discuss_types.AuthorError( - "Invalid sequence: Authors in the discussion must alternate strictly." - ) - - odd_authors = set(msg.author for msg in messages[1::2] if msg.author) - if not odd_authors: - odd_author = "1" - elif len(odd_authors) == 1: - odd_author = odd_authors.pop() - else: - raise discuss_types.AuthorError( - "Invalid sequence: Authors in the discussion must alternate strictly." - ) - - if all(msg.author for msg in messages): - return messages - - authors = [even_author, odd_author] - for i, msg in enumerate(messages): - msg.author = authors[i % 2] - - return messages - - -def _make_example(item: discuss_types.ExampleOptions) -> protos.Example: - """Creates a `protos.Example` object from the provided item.""" - if isinstance(item, protos.Example): - return item - - if isinstance(item, dict): - item = item.copy() - item["input"] = _make_message(item["input"]) - item["output"] = _make_message(item["output"]) - return protos.Example(item) - - if isinstance(item, Iterable): - input, output = list(item) - return protos.Example(input=_make_message(input), output=_make_message(output)) - - # try anyway - return protos.Example(item) - - -def _make_examples_from_flat( - examples: List[discuss_types.MessageOptions], -) -> List[protos.Example]: - """ - Creates a list of `protos.Example` objects from a list of message options. - - This function takes a list of `discuss_types.MessageOptions` and pairs them into - `protos.Example` objects. The input examples must be in pairs to create valid examples. - - Args: - examples: The list of `discuss_types.MessageOptions`. - - Returns: - A list of `protos.Example objects` created by pairing up the provided messages. - - Raises: - ValueError: If the provided list of examples is not of even length. - """ - if len(examples) % 2 != 0: - raise ValueError( - textwrap.dedent( - f"""\ - Invalid input: You must pass either `Primer` objects, pairs of messages, or an even number of messages. - Currently, {len(examples)} messages were provided, which is an odd number.""" - ) - ) - result = [] - pair = [] - for n, item in enumerate(examples): - msg = _make_message(item) - pair.append(msg) - if n % 2 == 0: - continue - primer = protos.Example( - input=pair[0], - output=pair[1], - ) - result.append(primer) - pair = [] - return result - - -def _make_examples( - examples: discuss_types.ExamplesOptions, -) -> List[protos.Example]: - """ - Creates a list of `protos.Example` objects from the provided examples. - - This function takes various types of example content inputs and creates a list - of `protos.Example` objects. It handles the conversion of different input types and ensures - the appropriate structure for creating valid examples. - - Args: - examples: The examples to convert. - - Returns: - A list of `protos.Example` objects created from the provided examples. - """ - if isinstance(examples, protos.Example): - return [examples] - - if isinstance(examples, dict): - return [_make_example(examples)] - - examples = list(examples) - - if not examples: - return examples - - first = examples[0] - - if isinstance(first, dict): - if "content" in first: - # These are `Messages` - return _make_examples_from_flat(examples) - else: - if not ("input" in first and "output" in first): - raise TypeError( - "Invalid dictionary format: To create an `Example` instance, the dictionary must contain both `input` and `output` keys." - ) - else: - if isinstance(first, discuss_types.MESSAGE_OPTIONS): - return _make_examples_from_flat(examples) - - result = [] - for item in examples: - result.append(_make_example(item)) - return result - - -def _make_message_prompt_dict( - prompt: discuss_types.MessagePromptOptions = None, - *, - context: str | None = None, - examples: discuss_types.ExamplesOptions | None = None, - messages: discuss_types.MessagesOptions | None = None, -) -> protos.MessagePrompt: - """ - Creates a `protos.MessagePrompt` object from the provided prompt components. - - This function constructs a `protos.MessagePrompt` object using the provided `context`, `examples`, - or `messages`. It ensures the proper structure and handling of the input components. - - Either pass a `prompt` or it's component `context`, `examples`, `messages`. - - Args: - prompt: The complete prompt components. - context: The context for the prompt. - examples: The examples for the prompt. - messages: The messages for the prompt. - - Returns: - A `protos.MessagePrompt` object created from the provided prompt components. - """ - if prompt is None: - prompt = dict( - context=context, - examples=examples, - messages=messages, - ) - else: - flat_prompt = (context is not None) or (examples is not None) or (messages is not None) - if flat_prompt: - raise ValueError( - "Invalid configuration: Either `prompt` or its fields `(context, examples, messages)` should be set, but not both simultaneously." - ) - if isinstance(prompt, protos.MessagePrompt): - return prompt - elif isinstance(prompt, dict): # Always check dict before Iterable. - pass - else: - prompt = {"messages": prompt} - - keys = set(prompt.keys()) - if not keys.issubset(discuss_types.MESSAGE_PROMPT_KEYS): - raise KeyError( - f"Invalid prompt dictionary: Extra entries found that are not recognized: {keys - discuss_types.MESSAGE_PROMPT_KEYS}. Please check the keys." - ) - - examples = prompt.get("examples", None) - if examples is not None: - prompt["examples"] = _make_examples(examples) - messages = prompt.get("messages", None) - if messages is not None: - prompt["messages"] = _make_messages(messages) - - prompt = {k: v for k, v in prompt.items() if v is not None} - return prompt - - -def _make_message_prompt( - prompt: discuss_types.MessagePromptOptions = None, - *, - context: str | None = None, - examples: discuss_types.ExamplesOptions | None = None, - messages: discuss_types.MessagesOptions | None = None, -) -> protos.MessagePrompt: - """Creates a `protos.MessagePrompt` object from the provided prompt components.""" - prompt = _make_message_prompt_dict( - prompt=prompt, context=context, examples=examples, messages=messages - ) - return protos.MessagePrompt(prompt) - - -def _make_generate_message_request( - *, - model: model_types.AnyModelNameOptions | None, - context: str | None = None, - examples: discuss_types.ExamplesOptions | None = None, - messages: discuss_types.MessagesOptions | None = None, - temperature: float | None = None, - candidate_count: int | None = None, - top_p: float | None = None, - top_k: float | None = None, - prompt: discuss_types.MessagePromptOptions | None = None, -) -> protos.GenerateMessageRequest: - """Creates a `protos.GenerateMessageRequest` object for generating messages.""" - model = model_types.make_model_name(model) - - prompt = _make_message_prompt( - prompt=prompt, context=context, examples=examples, messages=messages - ) - - return protos.GenerateMessageRequest( - model=model, - prompt=prompt, - temperature=temperature, - top_p=top_p, - top_k=top_k, - candidate_count=candidate_count, - ) - - -DEFAULT_DISCUSS_MODEL = "models/chat-bison-001" - - -def chat( - *, - model: model_types.AnyModelNameOptions | None = "models/chat-bison-001", - context: str | None = None, - examples: discuss_types.ExamplesOptions | None = None, - messages: discuss_types.MessagesOptions | None = None, - temperature: float | None = None, - candidate_count: int | None = None, - top_p: float | None = None, - top_k: float | None = None, - prompt: discuss_types.MessagePromptOptions | None = None, - client: glm.DiscussServiceClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> discuss_types.ChatResponse: - """Calls the API to initiate a chat with a model using provided parameters - - Args: - model: Which model to call, as a string or a `types.Model`. - context: Text that should be provided to the model first, to ground the response. - - If not empty, this `context` will be given to the model first before the - `examples` and `messages`. - - This field can be a description of your prompt to the model to help provide - context and guide the responses. - - Examples: - - * "Translate the phrase from English to French." - * "Given a statement, classify the sentiment as happy, sad or neutral." - - Anything included in this field will take precedence over history in `messages` - if the total input size exceeds the model's `Model.input_token_limit`. - examples: Examples of what the model should generate. - - This includes both the user input and the response that the model should - emulate. - - These `examples` are treated identically to conversation messages except - that they take precedence over the history in `messages`: - If the total input size exceeds the model's `input_token_limit` the input - will be truncated. Items will be dropped from `messages` before `examples` - messages: A snapshot of the conversation history sorted chronologically. - - Turns alternate between two authors. - - If the total input size exceeds the model's `input_token_limit` the input - will be truncated: The oldest items will be dropped from `messages`. - temperature: Controls the randomness of the output. Must be positive. - - Typical values are in the range: `[0.0,1.0]`. Higher values produce a - more random and varied response. A temperature of zero will be deterministic. - candidate_count: The **maximum** number of generated response messages to return. - - This value must be between `[1, 8]`, inclusive. If unset, this - will default to `1`. - - Note: Only unique candidates are returned. Higher temperatures are more - likely to produce unique candidates. Setting `temperature=0.0` will always - return 1 candidate regardless of the `candidate_count`. - top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and - top-k sampling. - - `top_k` sets the maximum number of tokens to sample from on each step. - top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and - top-k sampling. - - `top_p` configures the nucleus sampling. It sets the maximum cumulative - probability of tokens to sample from. - - For example, if the sorted probabilities are - `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample - as `[0.625, 0.25, 0.125, 0, 0, 0]`. - - Typical values are in the `[0.9, 1.0]` range. - prompt: You may pass a `types.MessagePromptOptions` **instead** of a - setting `context`/`examples`/`messages`, but not both. - client: If you're not relying on the default client, you pass a - `glm.DiscussServiceClient` instead. - request_options: Options for the request. - - Returns: - A `types.ChatResponse` containing the model's reply. - """ - request = _make_generate_message_request( - model=model, - context=context, - examples=examples, - messages=messages, - temperature=temperature, - candidate_count=candidate_count, - top_p=top_p, - top_k=top_k, - prompt=prompt, - ) - - return _generate_response(client=client, request=request, request_options=request_options) - - -@string_utils.set_doc(chat.__doc__) -async def chat_async( - *, - model: model_types.AnyModelNameOptions | None = "models/chat-bison-001", - context: str | None = None, - examples: discuss_types.ExamplesOptions | None = None, - messages: discuss_types.MessagesOptions | None = None, - temperature: float | None = None, - candidate_count: int | None = None, - top_p: float | None = None, - top_k: float | None = None, - prompt: discuss_types.MessagePromptOptions | None = None, - client: glm.DiscussServiceAsyncClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> discuss_types.ChatResponse: - """Calls the API asynchronously to initiate a chat with a model using provided parameters""" - request = _make_generate_message_request( - model=model, - context=context, - examples=examples, - messages=messages, - temperature=temperature, - candidate_count=candidate_count, - top_p=top_p, - top_k=top_k, - prompt=prompt, - ) - - return await _generate_response_async( - client=client, request=request, request_options=request_options - ) - - -if (sys.version_info.major, sys.version_info.minor) >= (3, 10): - DATACLASS_KWARGS = {"kw_only": True} -else: - DATACLASS_KWARGS = {} - - -@string_utils.prettyprint -@string_utils.set_doc(discuss_types.ChatResponse.__doc__) -@dataclasses.dataclass(**DATACLASS_KWARGS, init=False) -class ChatResponse(discuss_types.ChatResponse): - _client: glm.DiscussServiceClient | None = dataclasses.field(default=lambda: None, repr=False) - - def __init__(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - @property - @string_utils.set_doc(discuss_types.ChatResponse.last.__doc__) - def last(self) -> str | None: - if self.messages[-1]: - return self.messages[-1]["content"] - else: - return None - - @last.setter - def last(self, message: discuss_types.MessageOptions): - message = _make_message(message) - message = type(message).to_dict(message) - self.messages[-1] = message - - @string_utils.set_doc(discuss_types.ChatResponse.reply.__doc__) - def reply( - self, - message: discuss_types.MessageOptions, - request_options: helper_types.RequestOptionsType | None = None, - ) -> discuss_types.ChatResponse: - if isinstance(self._client, glm.DiscussServiceAsyncClient): - raise TypeError( - "Invalid operation: The 'reply' method cannot be called on an asynchronous client. Please use the 'reply_async' method instead." - ) - if self.last is None: - raise ValueError( - f"Invalid operation: No candidates returned from the model's last response. " - f"Please inspect the '.filters' attribute to understand why responses were filtered out. Current filters: {self.filters}" - ) - - request = self.to_dict() - request.pop("candidates") - request.pop("filters", None) - request["messages"] = list(request["messages"]) - request["messages"].append(_make_message(message)) - request = _make_generate_message_request(**request) - return _generate_response( - request=request, client=self._client, request_options=request_options - ) - - @string_utils.set_doc(discuss_types.ChatResponse.reply.__doc__) - async def reply_async( - self, message: discuss_types.MessageOptions - ) -> discuss_types.ChatResponse: - if isinstance(self._client, glm.DiscussServiceClient): - raise TypeError( - "Invalid method call: `reply_async` is not supported on a non-async client. Please use the `reply` method instead." - ) - request = self.to_dict() - request.pop("candidates") - request.pop("filters", None) - request["messages"] = list(request["messages"]) - request["messages"].append(_make_message(message)) - request = _make_generate_message_request(**request) - return await _generate_response_async(request=request, client=self._client) - - -def _build_chat_response( - request: protos.GenerateMessageRequest, - response: protos.GenerateMessageResponse, - client: glm.DiscussServiceClient | protos.DiscussServiceAsyncClient, -) -> ChatResponse: - request = type(request).to_dict(request) - prompt = request.pop("prompt") - request["examples"] = prompt["examples"] - request["context"] = prompt["context"] - request["messages"] = prompt["messages"] - - response = type(response).to_dict(response) - response.pop("messages") - - response["filters"] = palm_safety_types.convert_filters_to_enums(response["filters"]) - - if response["candidates"]: - last = response["candidates"][0] - else: - last = None - request["messages"].append(last) - request.setdefault("temperature", None) - request.setdefault("candidate_count", None) - - return ChatResponse(_client=client, **response, **request) # pytype: disable=missing-parameter - - -def _generate_response( - request: protos.GenerateMessageRequest, - client: glm.DiscussServiceClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> ChatResponse: - if request_options is None: - request_options = {} - - if client is None: - client = get_default_discuss_client() - - response = client.generate_message(request, **request_options) - - return _build_chat_response(request, response, client) - - -async def _generate_response_async( - request: protos.GenerateMessageRequest, - client: glm.DiscussServiceAsyncClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> ChatResponse: - if request_options is None: - request_options = {} - - if client is None: - client = get_default_discuss_async_client() - - response = await client.generate_message(request, **request_options) - - return _build_chat_response(request, response, client) - - -def count_message_tokens( - *, - prompt: discuss_types.MessagePromptOptions = None, - context: str | None = None, - examples: discuss_types.ExamplesOptions | None = None, - messages: discuss_types.MessagesOptions | None = None, - model: model_types.AnyModelNameOptions = DEFAULT_DISCUSS_MODEL, - client: glm.DiscussServiceAsyncClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> discuss_types.TokenCount: - """Calls the API to calculate the number of tokens used in the prompt.""" - - model = model_types.make_model_name(model) - prompt = _make_message_prompt(prompt, context=context, examples=examples, messages=messages) - - if request_options is None: - request_options = {} - - if client is None: - client = get_default_discuss_client() - - result = client.count_message_tokens(model=model, prompt=prompt, **request_options) - - return type(result).to_dict(result) diff --git a/google/generativeai/embedding.py b/google/generativeai/embedding.py index 616fa07bf..15645c792 100644 --- a/google/generativeai/embedding.py +++ b/google/generativeai/embedding.py @@ -24,8 +24,8 @@ from google.generativeai.client import get_default_generative_async_client from google.generativeai.types import helper_types -from google.generativeai.types import text_types from google.generativeai.types import model_types +from google.generativeai.types import text_types from google.generativeai.types import content_types DEFAULT_EMB_MODEL = "models/embedding-001" diff --git a/google/generativeai/text.py b/google/generativeai/text.py deleted file mode 100644 index 2a6267661..000000000 --- a/google/generativeai/text.py +++ /dev/null @@ -1,347 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from __future__ import annotations - -import dataclasses -from collections.abc import Iterable, Sequence -import itertools -from typing import Any, Iterable, overload, TypeVar - -import google.ai.generativelanguage as glm - -from google.generativeai import protos - -from google.generativeai.client import get_default_text_client -from google.generativeai import string_utils -from google.generativeai.types import helper_types -from google.generativeai.types import text_types -from google.generativeai.types import model_types -from google.generativeai import models -from google.generativeai.types import palm_safety_types - -DEFAULT_TEXT_MODEL = "models/text-bison-001" -EMBEDDING_MAX_BATCH_SIZE = 100 - -try: - # python 3.12+ - _batched = itertools.batched # type: ignore -except AttributeError: - T = TypeVar("T") - - def _batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]: - if n < 1: - raise ValueError(f"Batch size `n` must be >1, got: {n}") - batch = [] - for item in iterable: - batch.append(item) - if len(batch) == n: - yield batch - batch = [] - - if batch: - yield batch - - -def _make_text_prompt(prompt: str | dict[str, str]) -> protos.TextPrompt: - """ - Creates a `protos.TextPrompt` object based on the provided prompt input. - - Args: - prompt: The prompt input, either a string or a dictionary. - - Returns: - protos.TextPrompt: A TextPrompt object containing the prompt text. - - Raises: - TypeError: If the provided prompt is neither a string nor a dictionary. - """ - if isinstance(prompt, str): - return protos.TextPrompt(text=prompt) - elif isinstance(prompt, dict): - return protos.TextPrompt(prompt) - else: - raise TypeError( - "Invalid argument type: Expected a string or dictionary for the text prompt." - ) - - -def _make_generate_text_request( - *, - model: model_types.AnyModelNameOptions = DEFAULT_TEXT_MODEL, - prompt: str | None = None, - temperature: float | None = None, - candidate_count: int | None = None, - max_output_tokens: int | None = None, - top_p: int | None = None, - top_k: int | None = None, - safety_settings: palm_safety_types.SafetySettingOptions | None = None, - stop_sequences: str | Iterable[str] | None = None, -) -> protos.GenerateTextRequest: - """ - Creates a `protos.GenerateTextRequest` object based on the provided parameters. - - This function generates a `protos.GenerateTextRequest` object with the specified - parameters. It prepares the input parameters and creates a request that can be - used for generating text using the chosen model. - - Args: - model: The model to use for text generation. - prompt: The prompt for text generation. Defaults to None. - temperature: The temperature for randomness in generation. Defaults to None. - candidate_count: The number of candidates to consider. Defaults to None. - max_output_tokens: The maximum number of output tokens. Defaults to None. - top_p: The nucleus sampling probability threshold. Defaults to None. - top_k: The top-k sampling parameter. Defaults to None. - safety_settings: Safety settings for generated text. Defaults to None. - stop_sequences: Stop sequences to halt text generation. Can be a string - or iterable of strings. Defaults to None. - - Returns: - `protos.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters. - """ - model = model_types.make_model_name(model) - prompt = _make_text_prompt(prompt=prompt) - safety_settings = palm_safety_types.normalize_safety_settings(safety_settings) - if isinstance(stop_sequences, str): - stop_sequences = [stop_sequences] - if stop_sequences: - stop_sequences = list(stop_sequences) - - return protos.GenerateTextRequest( - model=model, - prompt=prompt, - temperature=temperature, - candidate_count=candidate_count, - max_output_tokens=max_output_tokens, - top_p=top_p, - top_k=top_k, - safety_settings=safety_settings, - stop_sequences=stop_sequences, - ) - - -def generate_text( - *, - model: model_types.AnyModelNameOptions = DEFAULT_TEXT_MODEL, - prompt: str, - temperature: float | None = None, - candidate_count: int | None = None, - max_output_tokens: int | None = None, - top_p: float | None = None, - top_k: float | None = None, - safety_settings: palm_safety_types.SafetySettingOptions | None = None, - stop_sequences: str | Iterable[str] | None = None, - client: glm.TextServiceClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> text_types.Completion: - """Calls the API to generate text based on the provided prompt. - - Args: - model: Which model to call, as a string or a `types.Model`. - prompt: Free-form input text given to the model. Given a prompt, the model will - generate text that completes the input text. - temperature: Controls the randomness of the output. Must be positive. - Typical values are in the range: `[0.0,1.0]`. Higher values produce a - more random and varied response. A temperature of zero will be deterministic. - candidate_count: The **maximum** number of generated response messages to return. - This value must be between `[1, 8]`, inclusive. If unset, this - will default to `1`. - - Note: Only unique candidates are returned. Higher temperatures are more - likely to produce unique candidates. Setting `temperature=0.0` will always - return 1 candidate regardless of the `candidate_count`. - max_output_tokens: Maximum number of tokens to include in a candidate. Must be greater - than zero. If unset, will default to 64. - top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling. - `top_k` sets the maximum number of tokens to sample from on each step. - top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling. - `top_p` configures the nucleus sampling. It sets the maximum cumulative - probability of tokens to sample from. - For example, if the sorted probabilities are - `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample - as `[0.625, 0.25, 0.125, 0, 0, 0]`. - safety_settings: A list of unique `types.SafetySetting` instances for blocking unsafe content. - These will be enforced on the `prompt` and - `candidates`. There should not be more than one - setting for each `types.SafetyCategory` type. The API will block any prompts and - responses that fail to meet the thresholds set by these settings. This list - overrides the default settings for each `SafetyCategory` specified in the - safety_settings. If there is no `types.SafetySetting` for a given - `SafetyCategory` provided in the list, the API will use the default safety - setting for that category. - stop_sequences: A set of up to 5 character sequences that will stop output generation. - If specified, the API will stop at the first appearance of a stop - sequence. The stop sequence will not be included as part of the response. - client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead. - request_options: Options for the request. - - Returns: - A `types.Completion` containing the model's text completion response. - """ - request = _make_generate_text_request( - model=model, - prompt=prompt, - temperature=temperature, - candidate_count=candidate_count, - max_output_tokens=max_output_tokens, - top_p=top_p, - top_k=top_k, - safety_settings=safety_settings, - stop_sequences=stop_sequences, - ) - - return _generate_response(client=client, request=request, request_options=request_options) - - -@string_utils.prettyprint -@dataclasses.dataclass(init=False) -class Completion(text_types.Completion): - def __init__(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - self.result = None - if self.candidates: - self.result = self.candidates[0]["output"] - - -def _generate_response( - request: protos.GenerateTextRequest, - client: glm.TextServiceClient = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> Completion: - """ - Generates a response using the provided `protos.GenerateTextRequest` and client. - - Args: - request: The text generation request. - client: The client to use for text generation. Defaults to None, in which - case the default text client is used. - request_options: Options for the request. - - Returns: - `Completion`: A `Completion` object with the generated text and response information. - """ - if request_options is None: - request_options = {} - - if client is None: - client = get_default_text_client() - - response = client.generate_text(request, **request_options) - response = type(response).to_dict(response) - - response["filters"] = palm_safety_types.convert_filters_to_enums(response["filters"]) - response["safety_feedback"] = palm_safety_types.convert_safety_feedback_to_enums( - response["safety_feedback"] - ) - response["candidates"] = palm_safety_types.convert_candidate_enums(response["candidates"]) - - return Completion(_client=client, **response) - - -def count_text_tokens( - model: model_types.AnyModelNameOptions, - prompt: str, - client: glm.TextServiceClient | None = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> text_types.TokenCount: - """Calls the API to count the number of tokens in the text prompt.""" - - base_model = models.get_base_model_name(model) - - if request_options is None: - request_options = {} - - if client is None: - client = get_default_text_client() - - result = client.count_text_tokens( - protos.CountTextTokensRequest(model=base_model, prompt={"text": prompt}), - **request_options, - ) - - return type(result).to_dict(result) - - -@overload -def generate_embeddings( - model: model_types.BaseModelNameOptions, - text: str, - client: glm.TextServiceClient = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> text_types.EmbeddingDict: ... - - -@overload -def generate_embeddings( - model: model_types.BaseModelNameOptions, - text: Sequence[str], - client: glm.TextServiceClient = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> text_types.BatchEmbeddingDict: ... - - -def generate_embeddings( - model: model_types.BaseModelNameOptions, - text: str | Sequence[str], - client: glm.TextServiceClient = None, - request_options: helper_types.RequestOptionsType | None = None, -) -> text_types.EmbeddingDict | text_types.BatchEmbeddingDict: - """Calls the API to create an embedding for the text passed in. - - Args: - model: Which model to call, as a string or a `types.Model`. - - text: Free-form input text given to the model. Given a string, the model will - generate an embedding based on the input text. - - client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead. - - request_options: Options for the request. - - Returns: - Dictionary containing the embedding (list of float values) for the input text. - """ - model = model_types.make_model_name(model) - - if request_options is None: - request_options = {} - - if client is None: - client = get_default_text_client() - - if isinstance(text, str): - embedding_request = protos.EmbedTextRequest(model=model, text=text) - embedding_response = client.embed_text( - embedding_request, - **request_options, - ) - embedding_dict = type(embedding_response).to_dict(embedding_response) - embedding_dict["embedding"] = embedding_dict["embedding"]["value"] - else: - result = {"embedding": []} - for batch in _batched(text, EMBEDDING_MAX_BATCH_SIZE): - # TODO(markdaoust): This could use an option for returning an iterator or wait-bar. - embedding_request = protos.BatchEmbedTextRequest(model=model, texts=batch) - embedding_response = client.batch_embed_text( - embedding_request, - **request_options, - ) - embedding_dict = type(embedding_response).to_dict(embedding_response) - result["embedding"].extend(e["value"] for e in embedding_dict["embeddings"]) - return result - - return embedding_dict diff --git a/google/generativeai/types/__init__.py b/google/generativeai/types/__init__.py index 0acfb1397..1e7853746 100644 --- a/google/generativeai/types/__init__.py +++ b/google/generativeai/types/__init__.py @@ -16,18 +16,14 @@ from google.generativeai.types.citation_types import * from google.generativeai.types.content_types import * -from google.generativeai.types.discuss_types import * from google.generativeai.types.file_types import * from google.generativeai.types.generation_types import * from google.generativeai.types.helper_types import * from google.generativeai.types.model_types import * from google.generativeai.types.permission_types import * from google.generativeai.types.safety_types import * -from google.generativeai.types.text_types import * -del discuss_types del model_types -del text_types del citation_types del safety_types diff --git a/google/generativeai/types/discuss_types.py b/google/generativeai/types/discuss_types.py deleted file mode 100644 index 05ad262f3..000000000 --- a/google/generativeai/types/discuss_types.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Type definitions for the discuss service.""" - -import abc -import dataclasses -from typing import Any, Dict, Union, Iterable, Optional, Tuple, List -from typing_extensions import TypedDict - -from google.generativeai import protos -from google.generativeai import string_utils - -from google.generativeai.types import palm_safety_types -from google.generativeai.types import citation_types - - -__all__ = [ - "MessageDict", - "MessageOptions", - "MessagesOptions", - "ExampleDict", - "ExampleOptions", - "ExamplesOptions", - "MessagePromptDict", - "MessagePromptOptions", - "ResponseDict", - "ChatResponse", - "AuthorError", -] - - -class TokenCount(TypedDict): - token_count: int - - -class MessageDict(TypedDict): - """A dict representation of a `protos.Message`.""" - - author: str - content: str - citation_metadata: Optional[citation_types.CitationMetadataDict] - - -MessageOptions = Union[str, MessageDict, protos.Message] -MESSAGE_OPTIONS = (str, dict, protos.Message) - -MessagesOptions = Union[ - MessageOptions, - Iterable[MessageOptions], -] -MESSAGES_OPTIONS = (MESSAGE_OPTIONS, Iterable) - - -class ExampleDict(TypedDict): - """A dict representation of a `protos.Example`.""" - - input: MessageOptions - output: MessageOptions - - -ExampleOptions = Union[ - Tuple[MessageOptions, MessageOptions], - Iterable[MessageOptions], - ExampleDict, - protos.Example, -] -EXAMPLE_OPTIONS = (protos.Example, dict, Iterable) -ExamplesOptions = Union[ExampleOptions, Iterable[ExampleOptions]] - - -class MessagePromptDict(TypedDict, total=False): - """A dict representation of a `protos.MessagePrompt`.""" - - context: str - examples: ExamplesOptions - messages: MessagesOptions - - -MessagePromptOptions = Union[ - str, - protos.Message, - Iterable[Union[str, protos.Message]], - MessagePromptDict, - protos.MessagePrompt, -] -MESSAGE_PROMPT_KEYS = {"context", "examples", "messages"} - - -class ResponseDict(TypedDict): - """A dict representation of a `protos.GenerateMessageResponse`.""" - - messages: List[MessageDict] - candidates: List[MessageDict] - - -@string_utils.prettyprint -@dataclasses.dataclass(init=False) -class ChatResponse(abc.ABC): - """A chat response from the model. - - * Use `response.last` (settable) for easy access to the text of the last response. - (`messages[-1]['content']`) - * Use `response.messages` to access the message history (including `.last`). - * Use `response.candidates` to access all the responses generated by the model. - - Other attributes are just saved from the arguments to `genai.chat`, so you - can easily continue a conversation: - - ``` - import google.generativeai as genai - - genai.configure(api_key=os.environ['GEMINI_API_KEY']) - - response = genai.chat(messages=["Hello."]) - print(response.last) # 'Hello! What can I help you with?' - response.reply("Can you tell me a joke?") - ``` - - See `genai.chat` for more details. - - Attributes: - candidates: A list of candidate responses from the model. - - The top candidate is appended to the `messages` field. - - This list will contain a *maximum* of `candidate_count` candidates. - It may contain fewer (duplicates are dropped), it will contain at least one. - - Note: The `temperature` field affects the variability of the responses. Low - temperatures will return few candidates. Setting `temperature=0` is deterministic, - so it will only ever return one candidate. - filters: This indicates which `types.SafetyCategory`(s) blocked a - candidate from this response, the lowest `types.HarmProbability` - that triggered a block, and the `types.HarmThreshold` setting for that category. - This indicates the smallest change to the `types.SafetySettings` that would be - necessary to unblock at least 1 response. - - The blocking is configured by the `types.SafetySettings` in the request (or the - default `types.SafetySettings` of the API). - messages: Contains all the `messages` that were passed when the model was called, - plus the top `candidate` message. - model: The model name. - context: Text that should be provided to the model first, to ground the response. - examples: Examples of what the model should generate. - messages: A snapshot of the conversation history sorted chronologically. - temperature: Controls the randomness of the output. Must be positive. - candidate_count: The **maximum** number of generated response messages to return. - top_k: The maximum number of tokens to consider when sampling. - top_p: The maximum cumulative probability of tokens to consider when sampling. - - """ - - model: str - context: str - examples: List[ExampleDict] - messages: List[Optional[MessageDict]] - temperature: Optional[float] - candidate_count: Optional[int] - candidates: List[MessageDict] - filters: List[palm_safety_types.ContentFilterDict] - top_p: Optional[float] = None - top_k: Optional[float] = None - - @property - @abc.abstractmethod - def last(self) -> Optional[str]: - """A settable property that provides simple access to the last response string - - A shortcut for `response.messages[0]['content']`. - """ - pass - - def to_dict(self) -> Dict[str, Any]: - result = { - "model": self.model, - "context": self.context, - "examples": self.examples, - "messages": self.messages, - "temperature": self.temperature, - "candidate_count": self.candidate_count, - "top_p": self.top_p, - "top_k": self.top_k, - "candidates": self.candidates, - } - return result - - @abc.abstractmethod - def reply(self, message: MessageOptions) -> "ChatResponse": - "Add a message to the conversation, and get the model's response." - pass - - -class AuthorError(Exception): - """Raised by the `chat` (or `reply`) functions when the author list can't be normalized.""" - - pass diff --git a/google/generativeai/types/text_types.py b/google/generativeai/types/text_types.py index 61804fcaa..e84a7e715 100644 --- a/google/generativeai/types/text_types.py +++ b/google/generativeai/types/text_types.py @@ -21,55 +21,12 @@ from typing_extensions import TypedDict from google.generativeai import string_utils -from google.generativeai.types import palm_safety_types from google.generativeai.types import citation_types -__all__ = ["Completion"] - - -class TokenCount(TypedDict): - token_count: int - - class EmbeddingDict(TypedDict): embedding: list[float] class BatchEmbeddingDict(TypedDict): embedding: list[list[float]] - - -class TextCompletion(TypedDict, total=False): - output: str - safety_ratings: List[palm_safety_types.SafetyRatingDict | None] - citation_metadata: citation_types.CitationMetadataDict | None - - -@string_utils.prettyprint -@dataclasses.dataclass(init=False) -class Completion(abc.ABC): - """The result returned by `generativeai.generate_text`. - - Use `GenerateTextResponse.candidates` to access all the completions generated by the model. - - Attributes: - candidates: A list of candidate text completions generated by the model. - result: The output of the first candidate, - filters: Indicates the reasons why content may have been blocked. - See `types.BlockedReason`. - safety_feedback: Indicates which safety settings blocked content in this result. - """ - - candidates: List[TextCompletion] - result: str | None - filters: List[palm_safety_types.ContentFilterDict | None] - safety_feedback: List[palm_safety_types.SafetyFeedbackDict | None] - - def to_dict(self) -> Dict[str, Any]: - result = { - "candidates": self.candidates, - "filters": self.filters, - "safety_feedback": self.safety_feedback, - } - return result diff --git a/tests/test_client.py b/tests/test_client.py index 0cc3e05eb..9162c3d75 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -58,11 +58,17 @@ def test_api_key_and_client_options(self): self.assertEqual(actual_client_opts.api_endpoint, "web.site") @parameterized.parameters( - client.get_default_discuss_client, - client.get_default_text_client, - client.get_default_discuss_async_client, + client.get_default_cache_client, + client.get_default_file_client, + client.get_default_file_async_client, + client.get_default_generative_client, + client.get_default_generative_async_client, client.get_default_model_client, client.get_default_operations_client, + client.get_default_retriever_client, + client.get_default_retriever_async_client, + client.get_default_permission_client, + client.get_default_permission_async_client, ) @mock.patch.dict(os.environ, {"GOOGLE_API_KEY": "AIzA_env"}) def test_configureless_client_with_key(self, factory_fn): @@ -76,7 +82,7 @@ class DummyClient: def __init__(self, *args, **kwargs): pass - def generate_text(self, metadata=None): + def generate_content(self, metadata=None): self.metadata = metadata not_a_function = 7 @@ -92,26 +98,26 @@ def static(): def classm(cls): cls.called_classm = True - @mock.patch.object(glm, "TextServiceClient", DummyClient) + @mock.patch.object(glm, "GenerativeServiceClient", DummyClient) def test_default_metadata(self): # The metadata wrapper injects this argument. metadata = [("hello", "world")] client.configure(default_metadata=metadata) - text_client = client.get_default_text_client() - text_client.generate_text() + generative_client = client.get_default_generative_client() + generative_client.generate_content() - self.assertEqual(metadata, text_client.metadata) + self.assertEqual(metadata, generative_client.metadata) - self.assertEqual(text_client.not_a_function, ClientTests.DummyClient.not_a_function) + self.assertEqual(generative_client.not_a_function, ClientTests.DummyClient.not_a_function) # Since these don't have a metadata arg, they'll fail if the wrapper is applied. - text_client._hidden() - self.assertTrue(text_client.called_hidden) + generative_client._hidden() + self.assertTrue(generative_client.called_hidden) - text_client.static() + generative_client.static() - text_client.classm() + generative_client.classm() self.assertTrue(ClientTests.DummyClient.called_classm) def test_same_config(self): diff --git a/tests/test_discuss.py b/tests/test_discuss.py deleted file mode 100644 index 4e54cf754..000000000 --- a/tests/test_discuss.py +++ /dev/null @@ -1,386 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import copy - -import unittest.mock - -from google.generativeai import protos - -from google.generativeai import discuss -from google.generativeai import client -import google.generativeai as genai -from google.generativeai.types import palm_safety_types - -from absl.testing import absltest -from absl.testing import parameterized - -# TODO: replace returns with 'assert' statements - - -class UnitTests(parameterized.TestCase): - def setUp(self): - self.client = unittest.mock.MagicMock() - - client._client_manager.clients["discuss"] = self.client - - self.observed_request = None - - self.mock_response = protos.GenerateMessageResponse( - candidates=[ - protos.Message(content="a", author="1"), - protos.Message(content="b", author="1"), - protos.Message(content="c", author="1"), - ], - ) - - def fake_generate_message( - request: protos.GenerateMessageRequest, - **kwargs, - ) -> protos.GenerateMessageResponse: - self.observed_request = request - response = copy.copy(self.mock_response) - response.messages = request.prompt.messages - return response - - self.client.generate_message = fake_generate_message - - @parameterized.named_parameters( - ["string", "Hello", ""], - ["dict", {"content": "Hello"}, ""], - ["dict_author", {"content": "Hello", "author": "me"}, "me"], - ["proto", protos.Message(content="Hello"), ""], - ["proto_author", protos.Message(content="Hello", author="me"), "me"], - ) - def test_make_message(self, message, author): - x = discuss._make_message(message) - self.assertIsInstance(x, protos.Message) - self.assertEqual("Hello", x.content) - self.assertEqual(author, x.author) - - @parameterized.named_parameters( - ["string", "Hello", ["Hello"]], - ["dict", {"content": "Hello"}, ["Hello"]], - ["proto", protos.Message(content="Hello"), ["Hello"]], - [ - "list", - ["hello0", {"content": "hello1"}, protos.Message(content="hello2")], - ["hello0", "hello1", "hello2"], - ], - ) - def test_make_messages(self, messages, expected_contents): - messages = discuss._make_messages(messages) - for expected, message in zip(expected_contents, messages): - self.assertEqual(expected, message.content) - - @parameterized.named_parameters( - ["tuple", ("hello", {"content": "goodbye"})], - ["iterable", iter(["hello", "goodbye"])], - ["dict", {"input": "hello", "output": "goodbye"}], - [ - "proto", - protos.Example( - input=protos.Message(content="hello"), - output=protos.Message(content="goodbye"), - ), - ], - ) - def test_make_example(self, example): - x = discuss._make_example(example) - self.assertIsInstance(x, protos.Example) - self.assertEqual("hello", x.input.content) - self.assertEqual("goodbye", x.output.content) - return - - @parameterized.named_parameters( - [ - "messages", - [ - "Hi", - {"content": "Hello!"}, - "what's your name?", - protos.Message(content="Dave, what's yours"), - ], - ], - [ - "examples", - [ - ("Hi", "Hello!"), - { - "input": "what's your name?", - "output": {"content": "Dave, what's yours"}, - }, - ], - ], - ) - def test_make_examples(self, examples): - examples = discuss._make_examples(examples) - self.assertLen(examples, 2) - self.assertEqual(examples[0].input.content, "Hi") - self.assertEqual(examples[0].output.content, "Hello!") - self.assertEqual(examples[1].input.content, "what's your name?") - self.assertEqual(examples[1].output.content, "Dave, what's yours") - - return - - def test_make_examples_from_example(self): - ex_dict = {"input": "hello", "output": "meow!"} - example = discuss._make_example(ex_dict) - examples1 = discuss._make_examples(ex_dict) - examples2 = discuss._make_examples(discuss._make_example(ex_dict)) - - self.assertEqual(example, examples1[0]) - self.assertEqual(example, examples2[0]) - - @parameterized.named_parameters( - ["str", "hello"], - ["message", protos.Message(content="hello")], - ["messages", ["hello"]], - ["dict", {"messages": "hello"}], - ["dict2", {"messages": ["hello"]}], - ["proto", protos.MessagePrompt(messages=[protos.Message(content="hello")])], - ) - def test_make_message_prompt_from_messages(self, prompt): - x = discuss._make_message_prompt(prompt) - self.assertIsInstance(x, protos.MessagePrompt) - self.assertEqual(x.messages[0].content, "hello") - return - - @parameterized.named_parameters( - [ - "dict", - [ - { - "context": "you are a cat", - "examples": ["are you hungry?", "meow!"], - "messages": "hello", - } - ], - {}, - ], - [ - "kwargs", - [], - { - "context": "you are a cat", - "examples": ["are you hungry?", "meow!"], - "messages": "hello", - }, - ], - [ - "proto", - [ - protos.MessagePrompt( - context="you are a cat", - examples=[ - protos.Example( - input=protos.Message(content="are you hungry?"), - output=protos.Message(content="meow!"), - ) - ], - messages=[protos.Message(content="hello")], - ) - ], - {}, - ], - ) - def test_make_message_prompt_from_prompt(self, args, kwargs): - x = discuss._make_message_prompt(*args, **kwargs) - self.assertIsInstance(x, protos.MessagePrompt) - self.assertEqual(x.context, "you are a cat") - self.assertEqual(x.examples[0].input.content, "are you hungry?") - self.assertEqual(x.examples[0].output.content, "meow!") - self.assertEqual(x.messages[0].content, "hello") - - def test_make_generate_message_request_nested( - self, - ): - request0 = discuss._make_generate_message_request( - **{ - "model": "models/Dave", - "context": "you are a cat", - "examples": ["hello", "meow", "are you hungry?", "meow!"], - "messages": "Please catch that mouse.", - "temperature": 0.2, - "candidate_count": 7, - } - ) - request1 = discuss._make_generate_message_request( - **{ - "model": "models/Dave", - "prompt": { - "context": "you are a cat", - "examples": ["hello", "meow", "are you hungry?", "meow!"], - "messages": "Please catch that mouse.", - }, - "temperature": 0.2, - "candidate_count": 7, - } - ) - - self.assertIsInstance(request0, protos.GenerateMessageRequest) - self.assertIsInstance(request1, protos.GenerateMessageRequest) - self.assertEqual(request0, request1) - - @parameterized.parameters( - {"prompt": {}, "context": "You are a cat."}, - { - "prompt": {"context": "You are a cat."}, - "examples": ["hello", "meow"], - }, - {"prompt": {"examples": ["hello", "meow"]}, "messages": "hello"}, - ) - def test_make_generate_message_request_flat_prompt_conflict( - self, - context=None, - examples=None, - messages=None, - prompt=None, - ): - with self.assertRaises(ValueError): - x = discuss._make_generate_message_request( - model="test", - context=context, - examples=examples, - messages=messages, - prompt=prompt, - ) - - @parameterized.parameters( - {"kwargs": {"context": "You are a cat."}}, - {"kwargs": {"messages": "hello"}}, - {"kwargs": {"examples": [["a", "b"], ["c", "d"]]}}, - { - "kwargs": { - "messages": ["hello"], - "examples": [["a", "b"], ["c", "d"]], - } - }, - ) - def test_reply(self, kwargs): - response = genai.chat(**kwargs) - first_messages = response.messages - - self.assertEqual("a", response.last) - self.assertEqual( - [ - {"author": "1", "content": "a"}, - {"author": "1", "content": "b"}, - {"author": "1", "content": "c"}, - ], - response.candidates, - ) - - response = response.reply("again") - - def test_receive_and_reply_with_filters(self): - self.mock_response = mock_response = protos.GenerateMessageResponse( - candidates=[protos.Message(content="a", author="1")], - filters=[ - protos.ContentFilter( - reason=palm_safety_types.BlockedReason.SAFETY, message="unsafe" - ), - protos.ContentFilter(reason=palm_safety_types.BlockedReason.OTHER), - ], - ) - response = discuss.chat(messages="do filters work?") - - filters = response.filters - self.assertLen(filters, 2) - self.assertIsInstance(filters[0]["reason"], palm_safety_types.BlockedReason) - self.assertEqual(filters[0]["reason"], palm_safety_types.BlockedReason.SAFETY) - self.assertEqual(filters[0]["message"], "unsafe") - - self.mock_response = protos.GenerateMessageResponse( - candidates=[protos.Message(content="a", author="1")], - filters=[ - protos.ContentFilter( - reason=palm_safety_types.BlockedReason.BLOCKED_REASON_UNSPECIFIED - ) - ], - ) - - response = response.reply("Does reply work?") - filters = response.filters - self.assertLen(filters, 1) - self.assertIsInstance(filters[0]["reason"], palm_safety_types.BlockedReason) - self.assertEqual( - filters[0]["reason"], - palm_safety_types.BlockedReason.BLOCKED_REASON_UNSPECIFIED, - ) - - def test_chat_citations(self): - self.mock_response = mock_response = protos.GenerateMessageResponse( - candidates=[ - { - "content": "Hello google!", - "author": "1", - "citation_metadata": { - "citation_sources": [ - { - "start_index": 6, - "end_index": 12, - "uri": "https://google.com", - } - ] - }, - } - ], - ) - - response = discuss.chat(messages="Do citations work?") - - self.assertEqual( - response.candidates[0]["citation_metadata"]["citation_sources"][0]["start_index"], - 6, - ) - - response = response.reply("What about a second time?") - - self.assertEqual( - response.candidates[0]["citation_metadata"]["citation_sources"][0]["start_index"], - 6, - ) - self.assertLen(response.messages, 4) - - def test_set_last(self): - response = discuss.chat(messages="Can you overwrite `.last`?") - response.last = "yes" - response = response.reply("glad to hear it!") - response.last = "Me too!" - self.assertEqual( - [msg["content"] for msg in response.messages], - [ - "Can you overwrite `.last`?", - "yes", - "glad to hear it!", - "Me too!", - ], - ) - - def test_generate_message_called_with_request_options(self): - self.client.generate_message = unittest.mock.MagicMock() - request = unittest.mock.ANY - request_options = {"timeout": 120} - - try: - genai.chat(**{"context": "You are a cat."}, request_options=request_options) - except AttributeError: - pass - - self.client.generate_message.assert_called_once_with(request, **request_options) - - -if __name__ == "__main__": - absltest.main() diff --git a/tests/test_discuss_async.py b/tests/test_discuss_async.py deleted file mode 100644 index d35d03525..000000000 --- a/tests/test_discuss_async.py +++ /dev/null @@ -1,85 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys -from typing import Any -import unittest - -from google.generativeai import protos - -from google.generativeai import discuss -from absl.testing import absltest -from absl.testing import parameterized - - -class AsyncTests(parameterized.TestCase, unittest.IsolatedAsyncioTestCase): - async def test_chat_async(self): - client = unittest.mock.AsyncMock() - - observed_request = None - - async def fake_generate_message( - request: protos.GenerateMessageRequest, - **kwargs, - ) -> protos.GenerateMessageResponse: - nonlocal observed_request - observed_request = request - return protos.GenerateMessageResponse( - candidates=[ - protos.Message( - author="1", - content="Why did the chicken cross the road?", - ) - ] - ) - - client.generate_message = fake_generate_message - - observed_response = await discuss.chat_async( - model="models/bard", - context="Example Prompt", - examples=[["Example from human", "Example response from AI"]], - messages=["Tell me a joke"], - temperature=0.75, - candidate_count=1, - client=client, - ) - - self.assertEqual( - observed_request, - protos.GenerateMessageRequest( - model="models/bard", - prompt=protos.MessagePrompt( - context="Example Prompt", - examples=[ - protos.Example( - input=protos.Message(content="Example from human"), - output=protos.Message(content="Example response from AI"), - ) - ], - messages=[protos.Message(author="0", content="Tell me a joke")], - ), - temperature=0.75, - candidate_count=1, - ), - ) - self.assertEqual( - observed_response.candidates, - [{"author": "1", "content": "Why did the chicken cross the road?"}], - ) - - -if __name__ == "__main__": - absltest.main() diff --git a/tests/test_text.py b/tests/test_text.py deleted file mode 100644 index 795c3dfcd..000000000 --- a/tests/test_text.py +++ /dev/null @@ -1,542 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import copy -import math -from typing import Any -import unittest -import unittest.mock as mock - -from google.generativeai import protos - -from google.generativeai import text as text_service -from google.generativeai import client -from google.generativeai.types import palm_safety_types -from google.generativeai.types import model_types -from absl.testing import absltest -from absl.testing import parameterized - - -class UnitTests(parameterized.TestCase): - def setUp(self): - self.client = unittest.mock.MagicMock() - - client._client_manager.clients["text"] = self.client - client._client_manager.clients["model"] = self.client - - self.observed_requests = [] - - self.responses = {} - - def add_client_method(f): - name = f.__name__ - setattr(self.client, name, f) - return f - - @add_client_method - def generate_text( - request: protos.GenerateTextRequest, - **kwargs, - ) -> protos.GenerateTextResponse: - self.observed_requests.append(request) - return self.responses["generate_text"] - - @add_client_method - def embed_text( - request: protos.EmbedTextRequest, - **kwargs, - ) -> protos.EmbedTextResponse: - self.observed_requests.append(request) - return self.responses["embed_text"] - - @add_client_method - def batch_embed_text( - request: protos.EmbedTextRequest, - **kwargs, - ) -> protos.EmbedTextResponse: - self.observed_requests.append(request) - - return protos.BatchEmbedTextResponse( - embeddings=[protos.Embedding(value=[1, 2, 3])] * len(request.texts) - ) - - @add_client_method - def count_text_tokens( - request: protos.CountTextTokensRequest, - **kwargs, - ) -> protos.CountTextTokensResponse: - self.observed_requests.append(request) - return self.responses["count_text_tokens"] - - @add_client_method - def get_tuned_model(name) -> protos.TunedModel: - request = protos.GetTunedModelRequest(name=name) - self.observed_requests.append(request) - response = copy.copy(self.responses["get_tuned_model"]) - return response - - @parameterized.named_parameters( - [ - dict(testcase_name="string", prompt="Hello how are"), - ] - ) - def test_make_prompt(self, prompt): - x = text_service._make_text_prompt(prompt) - self.assertIsInstance(x, protos.TextPrompt) - self.assertEqual("Hello how are", x.text) - - @parameterized.named_parameters( - [ - dict(testcase_name="string", prompt="What are you"), - ] - ) - def test_make_generate_text_request(self, prompt): - x = text_service._make_generate_text_request(model="models/chat-bison-001", prompt=prompt) - self.assertEqual("models/chat-bison-001", x.model) - self.assertIsInstance(x, protos.GenerateTextRequest) - - @parameterized.named_parameters( - [ - dict( - testcase_name="basic_model", - model="models/chat-lamda-001", - text="What are you?", - ) - ] - ) - def test_generate_embeddings(self, model, text): - self.responses["embed_text"] = protos.EmbedTextResponse( - embedding=protos.Embedding(value=[1, 2, 3]) - ) - - emb = text_service.generate_embeddings(model=model, text=text) - - self.assertIsInstance(emb, dict) - self.assertEqual( - self.observed_requests[-1], protos.EmbedTextRequest(model=model, text=text) - ) - self.assertIsInstance(emb["embedding"][0], float) - - @parameterized.named_parameters( - [ - dict( - testcase_name="small-2", - model="models/chat-lamda-001", - text=["Who are you?", "Who am I?"], - ), - dict( - testcase_name="even-batch", - model="models/chat-lamda-001", - text=["Who are you?"] * 100, - ), - dict( - testcase_name="even-batch-plus-one", - model="models/chat-lamda-001", - text=["Who are you?"] * 101, - ), - dict( - testcase_name="odd-batch", - model="models/chat-lamda-001", - text=["Who are you?"] * 237, - ), - ] - ) - def test_generate_embeddings_batch(self, model, text): - emb = text_service.generate_embeddings(model=model, text=text) - - self.assertIsInstance(emb, dict) - - # Check first and last requests. - self.assertEqual(self.observed_requests[-1].model, model) - self.assertEqual(self.observed_requests[-1].texts[-1], text[-1]) - self.assertEqual(self.observed_requests[0].texts[0], text[0]) - - # Check that the list has the right length. - self.assertIsInstance(emb["embedding"][0], list) - self.assertLen(emb["embedding"], len(text)) - - # Check that the right number of requests were sent. - self.assertLen( - self.observed_requests, - math.ceil(len(text) / text_service.EMBEDDING_MAX_BATCH_SIZE), - ) - - @parameterized.named_parameters( - [ - dict(testcase_name="basic", prompt="Why did the chicken cross the"), - dict( - testcase_name="temperature", - prompt="Why did the chicken cross the", - temperature=0.75, - ), - dict( - testcase_name="stop_list", - prompt="Why did the chicken cross the", - stop_sequences=["a", "b", "c"], - ), - dict( - testcase_name="count", - prompt="Why did the chicken cross the", - candidate_count=2, - ), - ] - ) - def test_generate_response(self, *, prompt, **kwargs): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[ - protos.TextCompletion(output=" road?"), - protos.TextCompletion(output=" bridge?"), - protos.TextCompletion(output=" river?"), - ] - ) - - complete = text_service.generate_text(prompt=prompt, **kwargs) - - self.assertEqual( - self.observed_requests[-1], - protos.GenerateTextRequest( - model="models/text-bison-001", prompt=protos.TextPrompt(text=prompt), **kwargs - ), - ) - - self.assertIsInstance(complete.result, str) - - self.assertEqual( - complete.candidates, - [ - {"output": " road?", "safety_ratings": []}, - {"output": " bridge?", "safety_ratings": []}, - {"output": " river?", "safety_ratings": []}, - ], - ) - - def test_stop_string(self): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[ - protos.TextCompletion(output="Hello world?"), - protos.TextCompletion(output="Hell!"), - protos.TextCompletion(output="I'm going to stop"), - ] - ) - complete = text_service.generate_text(prompt="Hello", stop_sequences="stop") - - self.assertEqual( - self.observed_requests[-1], - protos.GenerateTextRequest( - model="models/text-bison-001", - prompt=protos.TextPrompt(text="Hello"), - stop_sequences=["stop"], - ), - ) - # Just make sure it made it into the request object. - self.assertEqual(self.observed_requests[-1].stop_sequences, ["stop"]) - - @parameterized.named_parameters( - [ - dict( - testcase_name="basic", - safety_settings=[ - { - "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - "threshold": palm_safety_types.HarmBlockThreshold.BLOCK_NONE, - }, - { - "category": palm_safety_types.HarmCategory.HARM_CATEGORY_VIOLENCE, - "threshold": palm_safety_types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, - }, - ], - ), - dict( - testcase_name="strings", - safety_settings=[ - { - "category": "medical", - "threshold": "block_none", - }, - { - "category": "violent", - "threshold": "low", - }, - ], - ), - dict( - testcase_name="flat", - safety_settings={"medical": "block_none", "sex": "low"}, - ), - dict( - testcase_name="mixed", - safety_settings={ - "medical": palm_safety_types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE, - palm_safety_types.HarmCategory.HARM_CATEGORY_VIOLENCE: 1, - }, - ), - ] - ) - def test_safety_settings(self, safety_settings): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[ - protos.TextCompletion(output="No"), - ] - ) - # This test really just checks that the safety_settings get converted to a proto. - result = text_service.generate_text( - prompt="Say something wicked.", safety_settings=safety_settings - ) - - self.assertEqual( - self.observed_requests[-1].safety_settings[0].category, - palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - ) - - def test_filters(self): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[{"output": "hello"}], - filters=[ - { - "reason": palm_safety_types.BlockedReason.SAFETY, - "message": "not safe", - } - ], - ) - - response = text_service.generate_text(prompt="do filters work?") - self.assertIsInstance(response.filters[0]["reason"], palm_safety_types.BlockedReason) - self.assertEqual(response.filters[0]["reason"], palm_safety_types.BlockedReason.SAFETY) - - def test_safety_feedback(self): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[{"output": "hello"}], - safety_feedback=[ - { - "rating": { - "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - "probability": palm_safety_types.HarmProbability.HIGH, - }, - "setting": { - "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - "threshold": palm_safety_types.HarmBlockThreshold.BLOCK_NONE, - }, - } - ], - ) - - response = text_service.generate_text(prompt="does safety feedback work?") - self.assertIsInstance( - response.safety_feedback[0]["rating"]["probability"], - palm_safety_types.HarmProbability, - ) - self.assertEqual( - response.safety_feedback[0]["rating"]["probability"], - palm_safety_types.HarmProbability.HIGH, - ) - - self.assertIsInstance( - response.safety_feedback[0]["setting"]["category"], - protos.HarmCategory, - ) - self.assertEqual( - response.safety_feedback[0]["setting"]["category"], - palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - ) - - def test_candidate_safety_feedback(self): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[ - { - "output": "hello", - "safety_ratings": [ - { - "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - "probability": palm_safety_types.HarmProbability.HIGH, - }, - { - "category": palm_safety_types.HarmCategory.HARM_CATEGORY_VIOLENCE, - "probability": palm_safety_types.HarmProbability.LOW, - }, - ], - } - ] - ) - - result = text_service.generate_text(prompt="Write a story from the ER.") - self.assertIsInstance( - result.candidates[0]["safety_ratings"][0]["category"], - protos.HarmCategory, - ) - self.assertEqual( - result.candidates[0]["safety_ratings"][0]["category"], - palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL, - ) - - self.assertIsInstance( - result.candidates[0]["safety_ratings"][0]["probability"], - palm_safety_types.HarmProbability, - ) - self.assertEqual( - result.candidates[0]["safety_ratings"][0]["probability"], - palm_safety_types.HarmProbability.HIGH, - ) - - def test_candidate_citations(self): - self.responses["generate_text"] = protos.GenerateTextResponse( - candidates=[ - { - "output": "Hello Google!", - "citation_metadata": { - "citation_sources": [ - { - "start_index": 6, - "end_index": 12, - "uri": "https://google.com", - } - ] - }, - } - ] - ) - result = text_service.generate_text(prompt="Hi my name is Google") - self.assertEqual( - result.candidates[0]["citation_metadata"]["citation_sources"][0]["start_index"], - 6, - ) - - @parameterized.named_parameters( - [ - dict(testcase_name="base-name", model="models/text-bison-001"), - dict(testcase_name="tuned-name", model="tunedModels/bipedal-pangolin-001"), - dict( - testcase_name="model", - model=model_types.Model( - name="models/text-bison-001", - base_model_id="text-bison-001", - version="001", - display_name="🦬", - description="🦬🦬🦬🦬🦬🦬🦬🦬🦬🦬🦬", - input_token_limit=8000, - output_token_limit=4000, - supported_generation_methods=["GenerateText"], - ), - ), - dict( - testcase_name="tuned_model", - model=model_types.TunedModel( - name="tunedModels/bipedal-pangolin-001", - base_model="models/text-bison-001", - ), - ), - dict( - testcase_name="protos.model", - model=protos.Model( - name="models/text-bison-001", - ), - ), - dict( - testcase_name="protos.tuned_model", - model=protos.TunedModel( - name="tunedModels/bipedal-pangolin-001", - base_model="models/text-bison-001", - ), - ), - dict( - testcase_name="protos.tuned_model_nested", - model=protos.TunedModel( - name="tunedModels/bipedal-pangolin-002", - tuned_model_source={ - "tuned_model": "tunedModels/bipedal-pangolin-002", - "base_model": "models/text-bison-001", - }, - ), - ), - ] - ) - def test_count_message_tokens(self, model): - self.responses["get_tuned_model"] = protos.TunedModel( - name="tunedModels/bipedal-pangolin-001", base_model="models/text-bison-001" - ) - self.responses["count_text_tokens"] = protos.CountTextTokensResponse(token_count=7) - - response = text_service.count_text_tokens(model, "Tell me a story about a magic backpack.") - self.assertEqual({"token_count": 7}, response) - - should_look_up_model = isinstance(model, str) and model.startswith("tunedModels/") - if should_look_up_model: - self.assertLen(self.observed_requests, 2) - self.assertEqual( - self.observed_requests[0], - protos.GetTunedModelRequest(name="tunedModels/bipedal-pangolin-001"), - ) - - def test_count_text_tokens_called_with_request_options(self): - self.client.count_text_tokens = unittest.mock.MagicMock() - request = unittest.mock.ANY - request_options = {"timeout": 120} - - try: - result = text_service.count_text_tokens( - model="models/", - prompt="", - request_options=request_options, - ) - except AttributeError: - pass - - self.client.count_text_tokens.assert_called_once_with(request, **request_options) - - def test_batch_embed_text_called_with_request_options(self): - self.client.batch_embed_text = unittest.mock.MagicMock() - request = unittest.mock.ANY - request_options = {"timeout": 120} - - try: - result = text_service.generate_embeddings( - model="models/", - text=["first", "second"], - request_options=request_options, - ) - except AttributeError: - pass - - self.client.batch_embed_text.assert_called_once_with(request, **request_options) - - def test_embed_text_called_with_request_options(self): - self.client.embed_text = unittest.mock.MagicMock() - request = unittest.mock.ANY - request_options = {"timeout": 120} - - try: - result = text_service.generate_embeddings( - model="models/", - text="", - request_options=request_options, - ) - except AttributeError: - pass - - self.client.embed_text.assert_called_once_with(request, **request_options) - - def test_generate_text_called_with_request_options(self): - self.client.generate_text = unittest.mock.MagicMock() - request = unittest.mock.ANY - request_options = {"timeout": 120} - - try: - result = text_service.generate_text(prompt="", request_options=request_options) - except AttributeError: - pass - - self.client.generate_text.assert_called_once_with(request, **request_options) - - -if __name__ == "__main__": - absltest.main() From 32b754f76eea2498a43ef3f9865bf0264ef84314 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 22 Aug 2024 19:43:06 -0700 Subject: [PATCH 40/90] remove exit_on_error (#521) Change-Id: Ic4015704546f113e920fd173fb7b827b280cfe31 --- google/generativeai/notebook/cmd_line_parser.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/google/generativeai/notebook/cmd_line_parser.py b/google/generativeai/notebook/cmd_line_parser.py index 7005bf600..9b8e84048 100644 --- a/google/generativeai/notebook/cmd_line_parser.py +++ b/google/generativeai/notebook/cmd_line_parser.py @@ -373,15 +373,10 @@ def _create_parser( epilog = "" # Commands - extra_args = {} - if sys.version_info[0:2] >= (3, 9): - extra_args["exit_on_error"] = False - parser = argument_parser.ArgumentParser( prog=system_name, description=description, epilog=epilog, - **extra_args, ) subparsers = parser.add_subparsers(dest="cmd") _create_run_parser( From e805b240610313e893cb82235f134945c50f844f Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 27 Aug 2024 12:10:39 -0700 Subject: [PATCH 41/90] Expand PIL image support. (#523) * fixes#511 Change-Id: I7303becbbd6a9441c406a28e84ecc86d2fdd35bd * add gif support Change-Id: Ibc8e091c63d30626f78510156e8c024014dddcca * link to references Change-Id: I49b9ab206ade37a8a5535b21fa7fdf62a0c569d2 * describe logic Change-Id: I813e8569b91e01a0884b1c2ff75dfa84fcf4a609 * add test gif Change-Id: I1e4a62bb1e1fade244771027380b8a13f444885d * format Change-Id: Ifb93c70f8ad48dce0fd0921de0e9e117819dc55c --- google/generativeai/types/content_types.py | 25 ++++++++++++++++++++- tests/test_content.py | 15 +++++++++++++ tests/test_img.gif | Bin 0 -> 353 bytes 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 tests/test_img.gif diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py index e2e2b680d..71d1e9907 100644 --- a/google/generativeai/types/content_types.py +++ b/google/generativeai/types/content_types.py @@ -73,11 +73,34 @@ def pil_to_blob(img): + # When you load an image with PIL you get a subclass of PIL.Image + # The subclass knows what file type it was loaded from it has a `.format` class attribute + # and the `get_format_mimetype` method. Convert these back to the same file type. + # + # The base image class doesn't know its file type, it just knows its mode. + # RGBA converts to PNG easily, P[allet] converts to GIF, RGB to GIF. + # But for anything else I'm not going to bother mapping it out (for now) let's just convert to RGB and send it. + # + # References: + # - file formats: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html + # - image modes: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes + bytesio = io.BytesIO() - if isinstance(img, PIL.PngImagePlugin.PngImageFile) or img.mode == "RGBA": + + get_mime = getattr(img, "get_format_mimetype", None) + if get_mime is not None: + # If the image is created from a file, convert back to the same file type. + img.save(bytesio, format=img.format) + mime_type = img.get_format_mimetype() + elif img.mode == "RGBA": img.save(bytesio, format="PNG") mime_type = "image/png" + elif img.mode == "P": + img.save(bytesio, format="GIF") + mime_type = "image/gif" else: + if img.mode != "RGB": + img = img.convert("RGB") img.save(bytesio, format="JPEG") mime_type = "image/jpeg" bytesio.seek(0) diff --git a/tests/test_content.py b/tests/test_content.py index b52858bb8..da763dc33 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -35,6 +35,10 @@ TEST_JPG_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.jpg" TEST_JPG_DATA = TEST_JPG_PATH.read_bytes() +TEST_GIF_PATH = HERE / "test_img.gif" +TEST_GIF_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.gif" +TEST_GIF_DATA = TEST_GIF_PATH.read_bytes() + # simple test function def datetime(): @@ -88,6 +92,17 @@ def test_jpg_to_blob(self, image): self.assertEqual(blob.mime_type, "image/jpeg") self.assertStartsWith(blob.data, b"\xff\xd8\xff\xe0\x00\x10JFIF") + @parameterized.named_parameters( + ["PIL", PIL.Image.open(TEST_GIF_PATH)], + ["P", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8)).convert("P")], + ["IPython", IPython.display.Image(filename=TEST_GIF_PATH)], + ) + def test_gif_to_blob(self, image): + blob = content_types.image_to_blob(image) + self.assertIsInstance(blob, protos.Blob) + self.assertEqual(blob.mime_type, "image/gif") + self.assertStartsWith(blob.data, b"GIF87a") + @parameterized.named_parameters( ["BlobDict", {"mime_type": "image/png", "data": TEST_PNG_DATA}], ["protos.Blob", protos.Blob(mime_type="image/png", data=TEST_PNG_DATA)], diff --git a/tests/test_img.gif b/tests/test_img.gif new file mode 100644 index 0000000000000000000000000000000000000000..66c81ac7a78f1580cc952a6e45011fab28dee529 GIT binary patch literal 353 zcmZvXy-EX75QWEG(fv`92r-HlYYIt`A~1yuimZYwiiIGkg-Em*tVG`+CULQ_h@I1I znbJpyAy`de8LSLGft}rTHnx73JBKr82JYS6i^H`GAAGon3>;qsBtTY&8HC7)ph8H5 zz#5SN8Bj-zB;$aHm zik%czkGnw>SS_y@--Nf@@txzAPZzc>C%r}V!VdH;XRdOdK9B0nwj1nsD&Nhw$LZWg jd-!1`Cri1_gP(kV$1T;ahmH49%bBm94Z4M|ID++Gfh2%B literal 0 HcmV?d00001 From e0928fc0f239242f0cf5d69b6821188a8dac4e66 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 27 Aug 2024 12:11:00 -0700 Subject: [PATCH 42/90] Enum (#529) * try to support enum types Change-Id: I5141f751c4d6c578ef957aa8250cb26309ea9bd3 * format Change-Id: I9619654247f0f7230c8ba4c76035ad0ff9324fd4 * Be clear that test uses enum value. Change-Id: I03e319f2795c7c15f527316a145d021620936c57 * Add samples Change-Id: Ifc5e5b2039c9f0532d37386f6d7b136961943bac * Fix type annotations. Change-Id: I6b7b769cf0ba17fc7188518cdcec3085f59760b0 --- google/generativeai/responder.py | 2 +- google/generativeai/types/content_types.py | 7 ++- google/generativeai/types/generation_types.py | 6 +-- samples/controlled_generation.py | 51 ++++++++++++++++++- tests/test_content.py | 32 ++++++++++++ 5 files changed, 90 insertions(+), 8 deletions(-) diff --git a/google/generativeai/responder.py b/google/generativeai/responder.py index bb85167ad..dd388c6a6 100644 --- a/google/generativeai/responder.py +++ b/google/generativeai/responder.py @@ -116,7 +116,7 @@ def _generate_schema( inspect.Parameter.POSITIONAL_ONLY, ) } - parameters = pydantic.create_model(f.__name__, **fields_dict).schema() + parameters = pydantic.create_model(f.__name__, **fields_dict).model_json_schema() # Postprocessing # 4. Suppress unnecessary title generation: # * https://github.com/pydantic/pydantic/issues/1051 diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py index 71d1e9907..b925967c8 100644 --- a/google/generativeai/types/content_types.py +++ b/google/generativeai/types/content_types.py @@ -402,7 +402,7 @@ def _schema_for_function( def _build_schema(fname, fields_dict): - parameters = pydantic.create_model(fname, **fields_dict).schema() + parameters = pydantic.create_model(fname, **fields_dict).model_json_schema() defs = parameters.pop("$defs", {}) # flatten the defs for name, value in defs.items(): @@ -424,7 +424,10 @@ def _build_schema(fname, fields_dict): def unpack_defs(schema, defs): - properties = schema["properties"] + properties = schema.get("properties", None) + if properties is None: + return + for name, value in properties.items(): ref_key = value.get("$ref", None) if ref_key is not None: diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index d4bed8b86..84689a922 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -16,7 +16,6 @@ import collections import contextlib -import sys from collections.abc import Iterable, AsyncIterable, Mapping import dataclasses import itertools @@ -165,7 +164,7 @@ class GenerationConfig: top_p: float | None = None top_k: int | None = None response_mime_type: str | None = None - response_schema: protos.Schema | Mapping[str, Any] | None = None + response_schema: protos.Schema | Mapping[str, Any] | type | None = None GenerationConfigType = Union[protos.GenerationConfig, GenerationConfigDict, GenerationConfig] @@ -186,7 +185,8 @@ def _normalize_schema(generation_config): if not str(response_schema).startswith("list["): raise ValueError( f"Invalid input: Could not understand the type of '{response_schema}'. " - "Expected one of the following types: `int`, `float`, `str`, `bool`, `typing_extensions.TypedDict`, `dataclass`, or `list[...]`." + "Expected one of the following types: `int`, `float`, `str`, `bool`, `enum`, " + "`typing_extensions.TypedDict`, `dataclass` or `list[...]`." ) response_schema = content_types._schema_for_class(response_schema) diff --git a/samples/controlled_generation.py b/samples/controlled_generation.py index b0c269bb7..4942481f6 100644 --- a/samples/controlled_generation.py +++ b/samples/controlled_generation.py @@ -11,9 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. from absl.testing import absltest +import pathlib import google.generativeai as genai +media = pathlib.Path(__file__).parents[1] / "third_party" + class UnitTests(absltest.TestCase): def test_json_controlled_generation(self): @@ -22,6 +25,7 @@ def test_json_controlled_generation(self): class Recipe(typing.TypedDict): recipe_name: str + ingredients: list[str] model = genai.GenerativeModel("gemini-1.5-pro-latest") result = model.generate_content( @@ -36,14 +40,57 @@ class Recipe(typing.TypedDict): def test_json_no_schema(self): # [START json_no_schema] model = genai.GenerativeModel("gemini-1.5-pro-latest") - prompt = """List a few popular cookie recipes using this JSON schema: + prompt = """List a few popular cookie recipes in JSON format. + + Use this JSON schema: - Recipe = {'recipe_name': str} + Recipe = {'recipe_name': str, 'ingredients': list[str]} Return: list[Recipe]""" result = model.generate_content(prompt) print(result) # [END json_no_schema] + def test_json_enum(self): + # [START json_enum] + import enum + + class Choice(enum.Enum): + PERCUSSION = "Percussion" + STRING = "String" + WOODWIND = "Woodwind" + BRASS = "Brass" + KEYBOARD = "Keyboard" + + model = genai.GenerativeModel("gemini-1.5-pro-latest") + + organ = genai.upload_file(media / "organ.jpg") + result = model.generate_content( + ["What kind of instrument is this:", organ], + generation_config=genai.GenerationConfig( + response_mime_type="application/json", response_schema=Choice + ), + ) + print(result) # "Keyboard" + # [END json_enum] + + def test_json_enum_raw(self): + # [START json_enum_raw] + model = genai.GenerativeModel("gemini-1.5-pro-latest") + + organ = genai.upload_file(media / "organ.jpg") + result = model.generate_content( + ["What kind of instrument is this:", organ], + generation_config=genai.GenerationConfig( + response_mime_type="application/json", + response_schema={ + "type": "STRING", + "enum": ["Percussion", "String", "Woodwind", "Brass", "Keyboard"], + }, + ), + ) + print(result) # "Keyboard" + # [END json_enum_raw] + if __name__ == "__main__": absltest.main() diff --git a/tests/test_content.py b/tests/test_content.py index da763dc33..dc62e997b 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import dataclasses +import enum import pathlib import typing_extensions from typing import Any, Union, Iterable @@ -69,6 +70,18 @@ class ADataClassWithList: a: list[int] +class Choices(enum.Enum): + A = "a" + B = "b" + C = "c" + D = "d" + + +@dataclasses.dataclass +class HasEnum: + choice: Choices + + class UnitTests(parameterized.TestCase): @parameterized.named_parameters( ["PIL", PIL.Image.open(TEST_PNG_PATH)], @@ -551,6 +564,25 @@ def b(): }, ), ], + ["enum", Choices, protos.Schema(type=protos.Type.STRING, enum=["a", "b", "c", "d"])], + [ + "enum_list", + list[Choices], + protos.Schema( + type="ARRAY", + items=protos.Schema(type=protos.Type.STRING, enum=["a", "b", "c", "d"]), + ), + ], + [ + "has_enum", + HasEnum, + protos.Schema( + type=protos.Type.OBJECT, + properties={ + "choice": protos.Schema(type=protos.Type.STRING, enum=["a", "b", "c", "d"]) + }, + ), + ], ) def test_auto_schema(self, annotation, expected): def fun(a: annotation): From 4647e79bbde427342a3989c0b0ea2c75fdbaa968 Mon Sep 17 00:00:00 2001 From: Jaana Dogan Date: Fri, 6 Sep 2024 09:25:38 -0700 Subject: [PATCH 43/90] Avoid making recommendations about regions in README (#538) The region coverage is subject to change as we are rolling out AI Studio to more regions like the UK and EU. Users will be pointed to https://ai.google.dev/gemini-api/docs/available-regions if they are located in a region that is not yet supported. We won't be able to maintain outdated documents on READMEs moving forward, so please either don't mention some of the operational details that are subject to change or provide a link to the official docs. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 19db267de..f05edb77b 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ The Google AI Python SDK is the easiest way for Python developers to build with ## Get started with the Gemini API 1. Go to [Google AI Studio](https://aistudio.google.com/). 2. Login with your Google account. -3. [Create](https://aistudio.google.com/app/apikey) an API key. Note that in Europe the free tier is not available. +3. [Create](https://aistudio.google.com/app/apikey) an API key. 4. Try a Python SDK [quickstart](https://github.com/google-gemini/gemini-api-cookbook/blob/main/quickstarts/Prompting.ipynb) in the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/). 5. For detailed instructions, try the [Python SDK tutorial](https://ai.google.dev/tutorials/python_quickstart) on [ai.google.dev](https://ai.google.dev). From 836d31a890764d3234c4b562d5e3aa9693f7b0e5 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 9 Sep 2024 11:22:29 -0700 Subject: [PATCH 44/90] Add more enum samples. (#543) * Add more enum samples Change-Id: I743d5967cc1cc91576b8ddf5a60db1767d94508d * format Change-Id: I8f6f9389f1cae0a7c934217968d4e2e20bb9590e --- samples/controlled_generation.py | 69 ++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/samples/controlled_generation.py b/samples/controlled_generation.py index 4942481f6..78c422464 100644 --- a/samples/controlled_generation.py +++ b/samples/controlled_generation.py @@ -73,6 +73,34 @@ class Choice(enum.Enum): print(result) # "Keyboard" # [END json_enum] + def test_enum_in_json(self): + # [START enum_in_json] + import enum + from typing_extensions import TypedDict + + class Grade(enum.Enum): + A_PLUS = "a+" + A = "a" + B = "b" + C = "c" + D = "d" + F = "f" + + class Recipe(TypedDict): + recipe_name: str + grade: Grade + + model = genai.GenerativeModel("gemini-1.5-pro-latest") + + result = model.generate_content( + "List about 10 cookie recipes, grade them based on popularity", + generation_config=genai.GenerationConfig( + response_mime_type="application/json", response_schema=list[Recipe] + ), + ) + print(result) # [{"grade": "a+", "recipe_name": "Chocolate Chip Cookies"}, ...] + # [END enum_in_json] + def test_json_enum_raw(self): # [START json_enum_raw] model = genai.GenerativeModel("gemini-1.5-pro-latest") @@ -91,6 +119,47 @@ def test_json_enum_raw(self): print(result) # "Keyboard" # [END json_enum_raw] + def test_x_enum(self): + # [START x_enum] + import enum + + class Choice(enum.Enum): + PERCUSSION = "Percussion" + STRING = "String" + WOODWIND = "Woodwind" + BRASS = "Brass" + KEYBOARD = "Keyboard" + + model = genai.GenerativeModel("gemini-1.5-pro-latest") + + organ = genai.upload_file(media / "organ.jpg") + result = model.generate_content( + ["What kind of instrument is this:", organ], + generation_config=genai.GenerationConfig( + response_mime_type="text/x.enum", response_schema=Choice + ), + ) + print(result) # "Keyboard" + # [END x_enum] + + def test_x_enum_raw(self): + # [START x_enum_raw] + model = genai.GenerativeModel("gemini-1.5-pro-latest") + + organ = genai.upload_file(media / "organ.jpg") + result = model.generate_content( + ["What kind of instrument is this:", organ], + generation_config=genai.GenerationConfig( + response_mime_type="text/x.enum", + response_schema={ + "type": "STRING", + "enum": ["Percussion", "String", "Woodwind", "Brass", "Keyboard"], + }, + ), + ) + print(result) # "Keyboard" + # [END x_enum_raw] + if __name__ == "__main__": absltest.main() From 97019c4059041c81f1d72bb60b4109820533b4f3 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 9 Sep 2024 11:43:46 -0700 Subject: [PATCH 45/90] Update controlled_generation.py (#544) --- samples/controlled_generation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/controlled_generation.py b/samples/controlled_generation.py index 78c422464..042209a72 100644 --- a/samples/controlled_generation.py +++ b/samples/controlled_generation.py @@ -139,7 +139,7 @@ class Choice(enum.Enum): response_mime_type="text/x.enum", response_schema=Choice ), ) - print(result) # "Keyboard" + print(result) # Keyboard # [END x_enum] def test_x_enum_raw(self): @@ -157,7 +157,7 @@ def test_x_enum_raw(self): }, ), ) - print(result) # "Keyboard" + print(result) # Keyboard # [END x_enum_raw] From 9407dcde5666ba58831227d3acf8bd0e3f3b4f81 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 9 Sep 2024 16:56:27 -0700 Subject: [PATCH 46/90] upadate version (#545) Change-Id: Ib87e6502d2c3f70757208a882b9f57f6ed49fa13 --- google/generativeai/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/generativeai/version.py b/google/generativeai/version.py index f454e309f..a5bd48443 100644 --- a/google/generativeai/version.py +++ b/google/generativeai/version.py @@ -14,4 +14,4 @@ # limitations under the License. from __future__ import annotations -__version__ = "0.7.2" +__version__ = "0.8.0" From f03ef2db8e62e775a14662822fc4324f9ca63746 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 10 Sep 2024 09:53:01 -0700 Subject: [PATCH 47/90] update generativelanguage version (#547) Change-Id: Iff0d88eef997ba3ea3ede835b148831aee1213fc --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b4b05e619..29841ba1d 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ def get_version(): release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-ai-generativelanguage==0.6.6", + "google-ai-generativelanguage==0.6.9", "google-api-core", "google-api-python-client", "google-auth>=2.15.0", # 2.15 adds API key auth support From 1ccbc5a7c5cf30ee9cab39906e53cdb41f79c18f Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 10 Sep 2024 21:54:49 -0700 Subject: [PATCH 48/90] Expand `.text` error descriptions. (#527) * Expand error descriptions for #170 Change-Id: I8599dafc9e5084a43f2ce482644d0e9e16b61061 * Fix test failure cause by upgrade to protobuf>=5.0.0 Change-Id: I16a3c1964284d16efb48073662901454d4e4a6a1 * Format Change-Id: Iee130a7e58f2cfbc1001808ac892f119338626eb --- google/generativeai/types/generation_types.py | 65 +++++++++++++++++-- tests/test_generative_models.py | 4 +- 2 files changed, 60 insertions(+), 9 deletions(-) diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 84689a922..23e7fb1d8 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -412,14 +412,22 @@ def parts(self): """ candidates = self.candidates if not candidates: - raise ValueError( + msg = ( "Invalid operation: The `response.parts` quick accessor requires a single candidate, " - "but none were returned. Please check the `response.prompt_feedback` to determine if the prompt was blocked." + "but but `response.candidates` is empty." ) + if self.prompt_feedback: + raise ValueError( + msg + "\nThis appears to be caused by a blocked prompt, " + f"see `response.prompt_feedback`: {self.prompt_feedback}" + ) + else: + raise ValueError(msg) + if len(candidates) > 1: raise ValueError( - "Invalid operation: The `response.parts` quick accessor requires a single candidate. " - "For multiple candidates, please use `result.candidates[index].text`." + "Invalid operation: The `response.parts` quick accessor retrieves the parts for a single candidate. " + "This response contains multiple candidates, please use `result.candidates[index].text`." ) parts = candidates[0].content.parts return parts @@ -433,10 +441,53 @@ def text(self): """ parts = self.parts if not parts: - raise ValueError( - "Invalid operation: The `response.text` quick accessor requires the response to contain a valid `Part`, " - "but none were returned. Please check the `candidate.safety_ratings` to determine if the response was blocked." + candidate = self.candidates[0] + + fr = candidate.finish_reason + FinishReason = protos.Candidate.FinishReason + + msg = ( + "Invalid operation: The `response.text` quick accessor requires the response to contain a valid " + "`Part`, but none were returned. The candidate's " + f"[finish_reason](https://ai.google.dev/api/generate-content#finishreason) is {fr}." ) + if candidate.finish_message: + msg += 'The `finish_message` is "{candidate.finish_message}".' + + if fr is FinishReason.FINISH_REASON_UNSPECIFIED: + raise ValueError(msg) + elif fr is FinishReason.STOP: + raise ValueError(msg) + elif fr is FinishReason.MAX_TOKENS: + raise ValueError(msg) + elif fr is FinishReason.SAFETY: + raise ValueError( + msg + f" The candidate's safety_ratings are: {candidate.safety_ratings}.", + candidate.safety_ratings, + ) + elif fr is FinishReason.RECITATION: + raise ValueError( + msg + " Meaning that the model was reciting from copyrighted material." + ) + elif fr is FinishReason.LANGUAGE: + raise ValueError(msg + " Meaning the response was using an unsupported language.") + elif fr is FinishReason.OTHER: + raise ValueError(msg) + elif fr is FinishReason.BLOCKLIST: + raise ValueError(msg) + elif fr is FinishReason.PROHIBITED_CONTENT: + raise ValueError(msg) + elif fr is FinishReason.SPII: + raise ValueError(msg + " SPII - Sensitive Personally Identifiable Information.") + elif fr is FinishReason.MALFORMED_FUNCTION_CALL: + raise ValueError( + msg + " Meaning that model generated a `FunctionCall` that was invalid. " + "Setting the " + "[Function calling mode](https://ai.google.dev/gemini-api/docs/function-calling#function_calling_mode) " + "to `ANY` can fix this because it enables constrained decoding." + ) + else: + raise ValueError(msg) texts = [] for part in parts: diff --git a/tests/test_generative_models.py b/tests/test_generative_models.py index cccea9d48..79c1ac36f 100644 --- a/tests/test_generative_models.py +++ b/tests/test_generative_models.py @@ -1120,13 +1120,13 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self): "usage_metadata": {} }), ), - error= index: 0 - content { + error= content { parts { text: "abc" } } finish_reason: SAFETY + index: 0 citation_metadata { } """ From 90f8094cb972658ccb4de99fe282908c4a92af84 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Wed, 11 Sep 2024 14:19:49 -0700 Subject: [PATCH 49/90] Fix default_metadata for files API. (#548) * Fix default_metadata for files API. Change-Id: Ibba74ccda137fe4ac62525868e036a0a0d612348 * Pass the metadata through to the discovert URL Change-Id: If8f938a1026d46aafc617d1c0e95360c463f0ba0 * fix tests, getmembers doesn't return classmethod/staticmetnod objects. Change-Id: I9449c6f3299f820fea4128cb598f39cf359cd9ef * format Change-Id: I4133647fbea3b6b995ed6edf56bd2a243c2131af * Add notes describing interaction of the discovery URL and API endpoint. Change-Id: Ibb8740fd548cc8a8a75b32508d6d05aa6e89f633 * note abput default-metadata Change-Id: I9647c47718d7b6981692b484b9e9b2ed0f9a42cc * remove unused import Change-Id: Ie49216fef5eb13e202fd276fff62e7e20f03b062 --- google/generativeai/client.py | 41 +++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/google/generativeai/client.py b/google/generativeai/client.py index 01d0a003b..938018b96 100644 --- a/google/generativeai/client.py +++ b/google/generativeai/client.py @@ -2,9 +2,9 @@ import os import contextlib +import inspect import dataclasses import pathlib -import types from typing import Any, cast from collections.abc import Sequence import httplib2 @@ -30,6 +30,21 @@ __version__ = "0.0.0" USER_AGENT = "genai-py" + +#### Caution! #### +# - It would make sense for the discovery URL to respect the client_options.endpoint setting. +# - That would make testing Files on the staging server possible. +# - We tried fixing this once, but broke colab in the process because their endpoint didn't forward the discovery +# requests. https://github.com/google-gemini/generative-ai-python/pull/333 +# - Kaggle would have a similar problem (b/362278209). +# - I think their proxy would forward the discovery traffic. +# - But they don't need to intercept the files-service at all, and uploads of large files could overload them. +# - Do the scotty uploads go to the same domain? +# - If you do route the discovery call to kaggle, be sure to attach the default_metadata (they need it). +# - One solution to all this would be if configure could take overrides per service. +# - set client_options.endpoint, but use a different endpoint for file service? It's not clear how best to do that +# through the file service. +################## GENAI_API_DISCOVERY_URL = "https://generativelanguage.googleapis.com/$discovery/rest" @@ -50,7 +65,7 @@ def __init__(self, *args, **kwargs): self._discovery_api = None super().__init__(*args, **kwargs) - def _setup_discovery_api(self): + def _setup_discovery_api(self, metadata: dict | Sequence[tuple[str, str]] = ()): api_key = self._client_options.api_key if api_key is None: raise ValueError( @@ -61,6 +76,7 @@ def _setup_discovery_api(self): http=httplib2.Http(), postproc=lambda resp, content: (resp, content), uri=f"{GENAI_API_DISCOVERY_URL}?version=v1beta&key={api_key}", + headers=dict(metadata), ) response, content = request.execute() request.http.close() @@ -78,9 +94,10 @@ def create_file( name: str | None = None, display_name: str | None = None, resumable: bool = True, + metadata: Sequence[tuple[str, str]] = (), ) -> protos.File: if self._discovery_api is None: - self._setup_discovery_api() + self._setup_discovery_api(metadata) file = {} if name is not None: @@ -92,6 +109,8 @@ def create_file( filename=path, mimetype=mime_type, resumable=resumable ) request = self._discovery_api.media().upload(body={"file": file}, media_body=media) + for key, value in metadata: + request.headers[key] = value result = request.execute() return self.get_file({"name": result["file"]["name"]}) @@ -226,16 +245,14 @@ def make_client(self, name): def keep(name, f): if name.startswith("_"): return False - elif name == "create_file": - return False - elif not isinstance(f, types.FunctionType): - return False - elif isinstance(f, classmethod): + + if not callable(f): return False - elif isinstance(f, staticmethod): + + if "metadata" not in inspect.signature(f).parameters.keys(): return False - else: - return True + + return True def add_default_metadata_wrapper(f): def call(*args, metadata=(), **kwargs): @@ -244,7 +261,7 @@ def call(*args, metadata=(), **kwargs): return call - for name, value in cls.__dict__.items(): + for name, value in inspect.getmembers(cls): if not keep(name, value): continue f = getattr(client, name) From c0394466ff6e42f82d7c16f3800976b3da35e185 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 12 Sep 2024 09:52:03 -0700 Subject: [PATCH 50/90] update version (#554) Change-Id: Idb2f2006cdf4b37d7fc8c73112dba3a125edf175 --- google/generativeai/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/generativeai/version.py b/google/generativeai/version.py index a5bd48443..18d6452a5 100644 --- a/google/generativeai/version.py +++ b/google/generativeai/version.py @@ -14,4 +14,4 @@ # limitations under the License. from __future__ import annotations -__version__ = "0.8.0" +__version__ = "0.8.1" From 4f42118d3d4daf9addc1c18f7d6895f7cd74087d Mon Sep 17 00:00:00 2001 From: Mark McDonald Date: Fri, 13 Sep 2024 01:23:40 +0800 Subject: [PATCH 51/90] Ensure tests are runnable when standalone (#552) When we run tests internally that don't invoke the test class, they pass but don't run the tests. Our GitHub environment tests them fine but this change ensures they run in both environments. --- tests/test_helpers.py | 6 +++++- tests/test_protos.py | 5 +++++ tests/test_responder.py | 7 +++++-- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/test_helpers.py b/tests/test_helpers.py index f060caf88..5d9ec1c42 100644 --- a/tests/test_helpers.py +++ b/tests/test_helpers.py @@ -12,11 +12,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import pathlib import copy import collections from typing import Union +from absl.testing import absltest from absl.testing import parameterized from google.generativeai import protos @@ -81,3 +81,7 @@ def test_get_model(self, request_options, expected_timeout, expected_retry): self.assertEqual(self.observed_timeout[0], expected_timeout) self.assertEqual(str(self.observed_retry[0]), str(expected_retry)) + + +if __name__ == "__main__": + absltest.main() diff --git a/tests/test_protos.py b/tests/test_protos.py index 1b59b0c6e..8a76bd0e3 100644 --- a/tests/test_protos.py +++ b/tests/test_protos.py @@ -15,6 +15,7 @@ import pathlib import re +from absl.testing import absltest from absl.testing import parameterized ROOT = pathlib.Path(__file__).parent.parent @@ -32,3 +33,7 @@ def test_check_glm_imports(self): match, msg=f"Bad `glm.` usage, use `genai.protos` instead,\n in {fpath}", ) + + +if __name__ == "__main__": + absltest.main() diff --git a/tests/test_responder.py b/tests/test_responder.py index c075fc65a..d2818da8a 100644 --- a/tests/test_responder.py +++ b/tests/test_responder.py @@ -19,8 +19,7 @@ from absl.testing import parameterized from google.generativeai import protos from google.generativeai import responder -import IPython.display -import PIL.Image + HERE = pathlib.Path(__file__).parent TEST_PNG_PATH = HERE / "test_img.png" @@ -250,3 +249,7 @@ def fun(a: annotation): cfd = responder.FunctionDeclaration.from_function(fun) got = cfd.parameters.properties["a"] self.assertEqual(got, expected) + + +if __name__ == "__main__": + absltest.main() From 36e001ad92785667fa4e007c11db428b71048af2 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 23 Sep 2024 11:34:19 -0700 Subject: [PATCH 52/90] Add rev14 parameters and fixes. (#561) * Add rev14 parameters Change-Id: I16f2b1f5820a6cf867b9abb04ffd5c6e6d2d947b * Fix flakey repr test Change-Id: I89bcf1494cf72c6ee28f2b52d0345cbb40859862 * format Change-Id: I81cff23e9ce20cc20b4a0632d557c71f536fd485 * Use client preview Change-Id: I2d8a4ee2e9e4b6e00a16a9dac1136a2fa18d7a28 * Fix tests Change-Id: If8fbbba1966aa42601adec877e60d851d4f03b72 * Fix tuned model tests Change-Id: I5ace9222954be7d903ebbdabab9efc663fa79174 * Fix tests Change-Id: Ifa610965c5d6c38123080a7e16416ac325418285 * format Change-Id: I15fd5701dd5c4200461a32c968fa19e375403a7e * pytype Change-Id: I08f74d08c4e93bbfdf353370b5dd57d8bf86a637 * pytype Change-Id: If81b86c176008cd9a99e3b879fbd3af086ec2235 * 3.9 tests Change-Id: I13e66016327aae0b0f3274e941bc615f379e5669 --- google/generativeai/types/generation_types.py | 29 ++++++++-- google/generativeai/types/model_types.py | 5 +- setup.py | 2 +- tests/test_files.py | 12 +++-- tests/test_generation.py | 53 +++++++++++++------ tests/test_generative_models.py | 53 +++---------------- 6 files changed, 82 insertions(+), 72 deletions(-) diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 23e7fb1d8..8bd0a7736 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -144,17 +144,27 @@ class GenerationConfig: Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `genai.get_model` function. - + seed: + Optional. Seed used in decoding. If not set, the request uses a randomly generated seed. response_mime_type: Optional. Output response mimetype of the generated candidate text. Supported mimetype: `text/plain`: (default) Text output. + `text/x-enum`: for use with a string-enum in `response_schema` `application/json`: JSON response in the candidates. response_schema: Optional. Specifies the format of the JSON requested if response_mime_type is `application/json`. + presence_penalty: + Optional. + frequency_penalty: + Optional. + response_logprobs: + Optional. If true, export the `logprobs` results in response. + logprobs: + Optional. Number of candidates of log probabilities to return at each step of decoding. """ candidate_count: int | None = None @@ -163,8 +173,13 @@ class GenerationConfig: temperature: float | None = None top_p: float | None = None top_k: int | None = None + seed: int | None = None response_mime_type: str | None = None response_schema: protos.Schema | Mapping[str, Any] | type | None = None + presence_penalty: float | None = None + frequency_penalty: float | None = None + response_logprobs: bool | None = None + logprobs: int | None = None GenerationConfigType = Union[protos.GenerationConfig, GenerationConfigDict, GenerationConfig] @@ -306,6 +321,7 @@ def _join_code_execution_result(result_1, result_2): def _join_candidates(candidates: Iterable[protos.Candidate]): + """Joins stream chunks of a single candidate.""" candidates = tuple(candidates) index = candidates[0].index # These should all be the same. @@ -321,6 +337,7 @@ def _join_candidates(candidates: Iterable[protos.Candidate]): def _join_candidate_lists(candidate_lists: Iterable[list[protos.Candidate]]): + """Joins stream chunks where each chunk is a list of candidate chunks.""" # Assuming that is a candidate ends, it is no longer returned in the list of # candidates and that's why candidates have an index candidates = collections.defaultdict(list) @@ -344,10 +361,15 @@ def _join_prompt_feedbacks( def _join_chunks(chunks: Iterable[protos.GenerateContentResponse]): chunks = tuple(chunks) + if "usage_metadata" in chunks[-1]: + usage_metadata = chunks[-1].usage_metadata + else: + usage_metadata = None + return protos.GenerateContentResponse( candidates=_join_candidate_lists(c.candidates for c in chunks), prompt_feedback=_join_prompt_feedbacks(c.prompt_feedback for c in chunks), - usage_metadata=chunks[-1].usage_metadata, + usage_metadata=usage_metadata, ) @@ -541,7 +563,8 @@ def __str__(self) -> str: _result = _result.replace("\n", "\n ") if self._error: - _error = f",\nerror=<{self._error.__class__.__name__}> {self._error}" + + _error = f",\nerror={repr(self._error)}" else: _error = "" diff --git a/google/generativeai/types/model_types.py b/google/generativeai/types/model_types.py index 03922a64e..ff66d6339 100644 --- a/google/generativeai/types/model_types.py +++ b/google/generativeai/types/model_types.py @@ -143,7 +143,9 @@ def idecode_time(parent: dict["str", Any], name: str): def decode_tuned_model(tuned_model: protos.TunedModel | dict["str", Any]) -> TunedModel: if isinstance(tuned_model, protos.TunedModel): - tuned_model = type(tuned_model).to_dict(tuned_model) # pytype: disable=attribute-error + tuned_model = type(tuned_model).to_dict( + tuned_model, including_default_value_fields=False + ) # pytype: disable=attribute-error tuned_model["state"] = to_tuned_model_state(tuned_model.pop("state", None)) base_model = tuned_model.pop("base_model", None) @@ -195,6 +197,7 @@ class TunedModel: create_time: datetime.datetime | None = None update_time: datetime.datetime | None = None tuning_task: TuningTask | None = None + reader_project_numbers: list[int] | None = None @property def permissions(self) -> permission_types.Permissions: diff --git a/setup.py b/setup.py index 29841ba1d..0575dcd28 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ def get_version(): release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-ai-generativelanguage==0.6.9", + "google-ai-generativelanguage@https://storage.googleapis.com/generativeai-downloads/preview/ai-generativelanguage-v1beta-py.tar.gz", "google-api-core", "google-api-python-client", "google-auth>=2.15.0", # 2.15 adds API key auth support diff --git a/tests/test_files.py b/tests/test_files.py index 063f1ce3a..cb48316bd 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -12,13 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from google.generativeai.types import file_types import collections import datetime import os -from typing import Iterable, Union +from typing import Iterable, Sequence import pathlib import google @@ -37,12 +38,13 @@ def __init__(self, test): def create_file( self, - path: Union[str, pathlib.Path, os.PathLike], + path: str | pathlib.Path | os.PathLike, *, - mime_type: Union[str, None] = None, - name: Union[str, None] = None, - display_name: Union[str, None] = None, + mime_type: str | None = None, + name: str | None = None, + display_name: str | None = None, resumable: bool = True, + metadata: Sequence[tuple[str, str]] = (), ) -> protos.File: self.observed_requests.append( dict( diff --git a/tests/test_generation.py b/tests/test_generation.py index 0cc3bfd07..a1461e8b5 100644 --- a/tests/test_generation.py +++ b/tests/test_generation.py @@ -1,4 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import inspect +import json import string import textwrap from typing_extensions import TypedDict @@ -22,6 +38,8 @@ class Person(TypedDict): class UnitTests(parameterized.TestCase): + maxDiff = None + @parameterized.named_parameters( [ "protos.GenerationConfig", @@ -416,12 +434,8 @@ def test_join_prompt_feedbacks(self): ], "role": "assistant", }, - "citation_metadata": {"citation_sources": []}, "index": 0, - "finish_reason": 0, - "safety_ratings": [], - "token_count": 0, - "grounding_attributions": [], + "citation_metadata": {}, }, { "content": { @@ -429,11 +443,7 @@ def test_join_prompt_feedbacks(self): "role": "assistant", }, "index": 1, - "citation_metadata": {"citation_sources": []}, - "finish_reason": 0, - "safety_ratings": [], - "token_count": 0, - "grounding_attributions": [], + "citation_metadata": {}, }, { "content": { @@ -458,17 +468,16 @@ def test_join_prompt_feedbacks(self): }, ] }, - "finish_reason": 0, - "safety_ratings": [], - "token_count": 0, - "grounding_attributions": [], }, ] def test_join_candidates(self): candidate_lists = [[protos.Candidate(c) for c in cl] for cl in self.CANDIDATE_LISTS] result = generation_types._join_candidate_lists(candidate_lists) - self.assertEqual(self.MERGED_CANDIDATES, [type(r).to_dict(r) for r in result]) + self.assertEqual( + self.MERGED_CANDIDATES, + [type(r).to_dict(r, including_default_value_fields=False) for r in result], + ) def test_join_chunks(self): chunks = [protos.GenerateContentResponse(candidates=cl) for cl in self.CANDIDATE_LISTS] @@ -480,6 +489,10 @@ def test_join_chunks(self): ], ) + chunks[-1].usage_metadata = protos.GenerateContentResponse.UsageMetadata( + prompt_token_count=5 + ) + result = generation_types._join_chunks(chunks) expected = protos.GenerateContentResponse( @@ -495,10 +508,18 @@ def test_join_chunks(self): } ], }, + "usage_metadata": {"prompt_token_count": 5}, }, ) - self.assertEqual(type(expected).to_dict(expected), type(result).to_dict(expected)) + expected = json.dumps( + type(expected).to_dict(expected, including_default_value_fields=False), indent=4 + ) + result = json.dumps( + type(result).to_dict(result, including_default_value_fields=False), indent=4 + ) + + self.assertEqual(expected, result) def test_generate_content_response_iterator_end_to_end(self): chunks = [protos.GenerateContentResponse(candidates=cl) for cl in self.CANDIDATE_LISTS] diff --git a/tests/test_generative_models.py b/tests/test_generative_models.py index 79c1ac36f..fa69099ba 100644 --- a/tests/test_generative_models.py +++ b/tests/test_generative_models.py @@ -935,8 +935,7 @@ def test_repr_for_streaming_start_to_finish(self): "citation_metadata": {} } ], - "prompt_feedback": {}, - "usage_metadata": {} + "prompt_feedback": {} }), )""" ) @@ -964,8 +963,7 @@ def test_repr_for_streaming_start_to_finish(self): "citation_metadata": {} } ], - "prompt_feedback": {}, - "usage_metadata": {} + "prompt_feedback": {} }), )""" ) @@ -998,10 +996,10 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self): } }), ), - error= prompt_feedback { + error=BlockedPromptException(prompt_feedback { block_reason: SAFETY } - """ + )""" ) self.assertEqual(expected, result) @@ -1056,11 +1054,10 @@ def no_throw(): "citation_metadata": {} } ], - "prompt_feedback": {}, - "usage_metadata": {} + "prompt_feedback": {} }), ), - error= """ + error=ValueError()""" ) self.assertEqual(expected, result) @@ -1095,43 +1092,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self): response = chat.send_message("hello2", stream=True) result = repr(response) - expected = textwrap.dedent( - """\ - response: - GenerateContentResponse( - done=True, - iterator=None, - result=protos.GenerateContentResponse({ - "candidates": [ - { - "content": { - "parts": [ - { - "text": "abc" - } - ] - }, - "finish_reason": "SAFETY", - "index": 0, - "citation_metadata": {} - } - ], - "prompt_feedback": {}, - "usage_metadata": {} - }), - ), - error= content { - parts { - text: "abc" - } - } - finish_reason: SAFETY - index: 0 - citation_metadata { - } - """ - ) - self.assertEqual(expected, result) + self.assertIn("StopCandidateException", result) def test_repr_for_multi_turn_chat(self): # Multi turn chat From 8f7f5cb46c4359a93f4eb4606c0c3b4224f37e4e Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 23 Sep 2024 22:56:17 -0700 Subject: [PATCH 53/90] Update to use google.generativelanguage 0.6.10. (#568) * use the new generativelanguage==6.10 Change-Id: Ib65e073ad61c810e5b4d3c01bce71fc45af188d3 * Update version Change-Id: I32c344c366cfa753230435d6b4576f3309fc968f * 0.6.10 Change-Id: I265b5a3ab6ce598ce4c6383c5b4dd065842276f3 --- google/generativeai/version.py | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/google/generativeai/version.py b/google/generativeai/version.py index 18d6452a5..f6bf29d13 100644 --- a/google/generativeai/version.py +++ b/google/generativeai/version.py @@ -14,4 +14,4 @@ # limitations under the License. from __future__ import annotations -__version__ = "0.8.1" +__version__ = "0.8.2" diff --git a/setup.py b/setup.py index 0575dcd28..7aac23e9c 100644 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ def get_version(): release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-ai-generativelanguage@https://storage.googleapis.com/generativeai-downloads/preview/ai-generativelanguage-v1beta-py.tar.gz", + "google-ai-generativelanguage==0.6.10", "google-api-core", "google-api-python-client", "google-auth>=2.15.0", # 2.15 adds API key auth support From 6c8dad12643dc19e80f7d1943125984ae845c6d8 Mon Sep 17 00:00:00 2001 From: Laurent Picard Date: Tue, 24 Sep 2024 20:29:53 +0200 Subject: [PATCH 54/90] fix: preserve quality and optimize transfer of prompt images (#570) * fix: preserve quality and optimize transfer of prompt images * Move numpy-images to their own test case. Change-Id: Ie6b02c7647487c1df9d4e70e9b8eed70dc8b8fe3 * Format with black Change-Id: I04550a89eed9bb21c0a8f6f9b6ab76b8b0f41270 --------- Co-authored-by: Mark Daoust --- google/generativeai/types/content_types.py | 68 ++++++++++------------ tests/test_content.py | 15 ++++- 2 files changed, 43 insertions(+), 40 deletions(-) diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py index b925967c8..531999f55 100644 --- a/google/generativeai/types/content_types.py +++ b/google/generativeai/types/content_types.py @@ -19,6 +19,7 @@ import io import inspect import mimetypes +import pathlib import typing from typing import Any, Callable, Union from typing_extensions import TypedDict @@ -30,7 +31,7 @@ if typing.TYPE_CHECKING: import PIL.Image - import PIL.PngImagePlugin + import PIL.ImageFile import IPython.display IMAGE_TYPES = (PIL.Image.Image, IPython.display.Image) @@ -38,7 +39,7 @@ IMAGE_TYPES = () try: import PIL.Image - import PIL.PngImagePlugin + import PIL.ImageFile IMAGE_TYPES = IMAGE_TYPES + (PIL.Image.Image,) except ImportError: @@ -72,46 +73,39 @@ ] -def pil_to_blob(img): - # When you load an image with PIL you get a subclass of PIL.Image - # The subclass knows what file type it was loaded from it has a `.format` class attribute - # and the `get_format_mimetype` method. Convert these back to the same file type. - # - # The base image class doesn't know its file type, it just knows its mode. - # RGBA converts to PNG easily, P[allet] converts to GIF, RGB to GIF. - # But for anything else I'm not going to bother mapping it out (for now) let's just convert to RGB and send it. - # - # References: - # - file formats: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html - # - image modes: https://pillow.readthedocs.io/en/stable/handbook/concepts.html#modes - - bytesio = io.BytesIO() - - get_mime = getattr(img, "get_format_mimetype", None) - if get_mime is not None: - # If the image is created from a file, convert back to the same file type. - img.save(bytesio, format=img.format) - mime_type = img.get_format_mimetype() - elif img.mode == "RGBA": - img.save(bytesio, format="PNG") - mime_type = "image/png" - elif img.mode == "P": - img.save(bytesio, format="GIF") - mime_type = "image/gif" - else: - if img.mode != "RGB": - img = img.convert("RGB") - img.save(bytesio, format="JPEG") - mime_type = "image/jpeg" - bytesio.seek(0) - data = bytesio.read() - return protos.Blob(mime_type=mime_type, data=data) +def _pil_to_blob(image: PIL.Image.Image) -> protos.Blob: + # If the image is a local file, return a file-based blob without any modification. + # Otherwise, return a lossless WebP blob (same quality with optimized size). + def file_blob(image: PIL.Image.Image) -> protos.Blob | None: + if not isinstance(image, PIL.ImageFile.ImageFile) or image.filename is None: + return None + filename = str(image.filename) + if not pathlib.Path(filename).is_file(): + return None + + mime_type = image.get_format_mimetype() + image_bytes = pathlib.Path(filename).read_bytes() + + return protos.Blob(mime_type=mime_type, data=image_bytes) + + def webp_blob(image: PIL.Image.Image) -> protos.Blob: + # Reference: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#webp + image_io = io.BytesIO() + image.save(image_io, format="webp", lossless=True) + image_io.seek(0) + + mime_type = "image/webp" + image_bytes = image_io.read() + + return protos.Blob(mime_type=mime_type, data=image_bytes) + + return file_blob(image) or webp_blob(image) def image_to_blob(image) -> protos.Blob: if PIL is not None: if isinstance(image, PIL.Image.Image): - return pil_to_blob(image) + return _pil_to_blob(image) if IPython is not None: if isinstance(image, IPython.display.Image): diff --git a/tests/test_content.py b/tests/test_content.py index dc62e997b..52e78f349 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -83,9 +83,20 @@ class HasEnum: class UnitTests(parameterized.TestCase): + @parameterized.named_parameters( - ["PIL", PIL.Image.open(TEST_PNG_PATH)], ["RGBA", PIL.Image.fromarray(np.zeros([6, 6, 4], dtype=np.uint8))], + ["RGB", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8))], + ["P", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8)).convert("P")], + ) + def test_numpy_to_blob(self, image): + blob = content_types.image_to_blob(image) + self.assertIsInstance(blob, protos.Blob) + self.assertEqual(blob.mime_type, "image/webp") + self.assertStartsWith(blob.data, b"RIFF \x00\x00\x00WEBPVP8L") + + @parameterized.named_parameters( + ["PIL", PIL.Image.open(TEST_PNG_PATH)], ["IPython", IPython.display.Image(filename=TEST_PNG_PATH)], ) def test_png_to_blob(self, image): @@ -96,7 +107,6 @@ def test_png_to_blob(self, image): @parameterized.named_parameters( ["PIL", PIL.Image.open(TEST_JPG_PATH)], - ["RGB", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8))], ["IPython", IPython.display.Image(filename=TEST_JPG_PATH)], ) def test_jpg_to_blob(self, image): @@ -107,7 +117,6 @@ def test_jpg_to_blob(self, image): @parameterized.named_parameters( ["PIL", PIL.Image.open(TEST_GIF_PATH)], - ["P", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8)).convert("P")], ["IPython", IPython.display.Image(filename=TEST_GIF_PATH)], ) def test_gif_to_blob(self, image): From d5103eb239145328bfc376f8730ffaf9108339c5 Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Tue, 24 Sep 2024 11:30:49 -0700 Subject: [PATCH 55/90] Search grounding (#558) * Updated tests and current progress on adding search grounding. * Update google/generativeai/types/content_types.py Co-authored-by: Mark Daoust * Update tests/test_content.py Co-authored-by: Mark Daoust * Update search grounding * update content_types * Update and add aditional test cases * update test case on empty_dictionary_with_dynamic_retrieval_config * Update test cases and _make_search_grounding * fix tests Change-Id: Ib9e19d78861da180f713e09ec93d366d5d7b5762 * Remove print statement * Fix tuned model tests Change-Id: I5ace9222954be7d903ebbdabab9efc663fa79174 * Fix tests Change-Id: Ifa610965c5d6c38123080a7e16416ac325418285 * format Change-Id: Iab48a9400d53f3cbdc5ca49c73df4f6a186a867b * fix typing Change-Id: If892b20ca29d1afb82c48ae1a49bef58e0421bab * Format Change-Id: I51a51150879adb3d4b6b00323e0d8eaf4c0b2515 --------- Co-authored-by: Mark Daoust --- google/generativeai/types/content_types.py | 104 +++++++++++++++++++-- tests/test_content.py | 78 ++++++++++++++-- 2 files changed, 169 insertions(+), 13 deletions(-) diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py index 531999f55..f3db610e1 100644 --- a/google/generativeai/types/content_types.py +++ b/google/generativeai/types/content_types.py @@ -72,6 +72,27 @@ "FunctionLibraryType", ] +Mode = protos.DynamicRetrievalConfig.Mode + +ModeOptions = Union[int, str, Mode] + +_MODE: dict[ModeOptions, Mode] = { + Mode.MODE_UNSPECIFIED: Mode.MODE_UNSPECIFIED, + 0: Mode.MODE_UNSPECIFIED, + "mode_unspecified": Mode.MODE_UNSPECIFIED, + "unspecified": Mode.MODE_UNSPECIFIED, + Mode.MODE_DYNAMIC: Mode.MODE_DYNAMIC, + 1: Mode.MODE_DYNAMIC, + "mode_dynamic": Mode.MODE_DYNAMIC, + "dynamic": Mode.MODE_DYNAMIC, +} + + +def to_mode(x: ModeOptions) -> Mode: + if isinstance(x, str): + x = x.lower() + return _MODE[x] + def _pil_to_blob(image: PIL.Image.Image) -> protos.Blob: # If the image is a local file, return a file-based blob without any modification. @@ -644,16 +665,54 @@ def _encode_fd(fd: FunctionDeclaration | protos.FunctionDeclaration) -> protos.F return fd.to_proto() +class DynamicRetrievalConfigDict(TypedDict): + mode: protos.DynamicRetrievalConfig.mode + dynamic_threshold: float + + +DynamicRetrievalConfig = Union[protos.DynamicRetrievalConfig, DynamicRetrievalConfigDict] + + +class GoogleSearchRetrievalDict(TypedDict): + dynamic_retrieval_config: DynamicRetrievalConfig + + +GoogleSearchRetrievalType = Union[protos.GoogleSearchRetrieval, GoogleSearchRetrievalDict] + + +def _make_google_search_retrieval(gsr: GoogleSearchRetrievalType): + if isinstance(gsr, protos.GoogleSearchRetrieval): + return gsr + elif isinstance(gsr, Mapping): + drc = gsr.get("dynamic_retrieval_config", None) + if drc is not None and isinstance(drc, Mapping): + mode = drc.get("mode", None) + if mode is not None: + mode = to_mode(mode) + gsr = gsr.copy() + gsr["dynamic_retrieval_config"]["mode"] = mode + return protos.GoogleSearchRetrieval(gsr) + else: + raise TypeError( + "Invalid input type. Expected an instance of `genai.GoogleSearchRetrieval`.\n" + f"However, received an object of type: {type(gsr)}.\n" + f"Object Value: {gsr}" + ) + + class Tool: - """A wrapper for `protos.Tool`, Contains a collection of related `FunctionDeclaration` objects.""" + """A wrapper for `protos.Tool`, Contains a collection of related `FunctionDeclaration` objects, + protos.CodeExecution object, and protos.GoogleSearchRetrieval object.""" def __init__( self, + *, function_declarations: Iterable[FunctionDeclarationType] | None = None, + google_search_retrieval: GoogleSearchRetrievalType | None = None, code_execution: protos.CodeExecution | None = None, ): # The main path doesn't use this but is seems useful. - if function_declarations: + if function_declarations is not None: self._function_declarations = [ _make_function_declaration(f) for f in function_declarations ] @@ -668,8 +727,14 @@ def __init__( self._function_declarations = [] self._index = {} + if google_search_retrieval is not None: + self._google_search_retrieval = _make_google_search_retrieval(google_search_retrieval) + else: + self._google_search_retrieval = None + self._proto = protos.Tool( function_declarations=[_encode_fd(fd) for fd in self._function_declarations], + google_search_retrieval=google_search_retrieval, code_execution=code_execution, ) @@ -677,6 +742,10 @@ def __init__( def function_declarations(self) -> list[FunctionDeclaration | protos.FunctionDeclaration]: return self._function_declarations + @property + def google_search_retrieval(self) -> protos.GoogleSearchRetrieval: + return self._google_search_retrieval + @property def code_execution(self) -> protos.CodeExecution: return self._proto.code_execution @@ -705,7 +774,7 @@ class ToolDict(TypedDict): ToolType = Union[ - Tool, protos.Tool, ToolDict, Iterable[FunctionDeclarationType], FunctionDeclarationType + str, Tool, protos.Tool, ToolDict, Iterable[FunctionDeclarationType], FunctionDeclarationType ] @@ -717,9 +786,23 @@ def _make_tool(tool: ToolType) -> Tool: code_execution = tool.code_execution else: code_execution = None - return Tool(function_declarations=tool.function_declarations, code_execution=code_execution) + + if "google_search_retrieval" in tool: + google_search_retrieval = tool.google_search_retrieval + else: + google_search_retrieval = None + + return Tool( + function_declarations=tool.function_declarations, + google_search_retrieval=google_search_retrieval, + code_execution=code_execution, + ) elif isinstance(tool, dict): - if "function_declarations" in tool or "code_execution" in tool: + if ( + "function_declarations" in tool + or "google_search_retrieval" in tool + or "code_execution" in tool + ): return Tool(**tool) else: fd = tool @@ -727,10 +810,17 @@ def _make_tool(tool: ToolType) -> Tool: elif isinstance(tool, str): if tool.lower() == "code_execution": return Tool(code_execution=protos.CodeExecution()) + # Check to see if one of the mode enums matches + elif tool.lower() == "google_search_retrieval": + return Tool(google_search_retrieval=protos.GoogleSearchRetrieval()) else: - raise ValueError("The only string that can be passed as a tool is 'code_execution'.") + raise ValueError( + "The only string that can be passed as a tool is 'code_execution', or one of the specified values for the `mode` parameter for google_search_retrieval." + ) elif isinstance(tool, protos.CodeExecution): return Tool(code_execution=tool) + elif isinstance(tool, protos.GoogleSearchRetrieval): + return Tool(google_search_retrieval=tool) elif isinstance(tool, Iterable): return Tool(function_declarations=tool) else: @@ -786,7 +876,7 @@ def to_proto(self): def _make_tools(tools: ToolsType) -> list[Tool]: if isinstance(tools, str): - if tools.lower() == "code_execution": + if tools.lower() == "code_execution" or tools.lower() == "google_search_retrieval": return [_make_tool(tools)] else: raise ValueError("The only string that can be passed as a tool is 'code_execution'.") diff --git a/tests/test_content.py b/tests/test_content.py index 52e78f349..2031e40ae 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -435,12 +435,78 @@ def no_args(): ["empty_dictionary_list", [{"code_execution": {}}]], ) def test_code_execution(self, tools): - if isinstance(tools, Iterable): - t = content_types._make_tools(tools) - self.assertIsInstance(t[0].code_execution, protos.CodeExecution) - else: - t = content_types._make_tool(tools) # Pass code execution into tools - self.assertIsInstance(t.code_execution, protos.CodeExecution) + t = content_types._make_tools(tools) + self.assertIsInstance(t[0].code_execution, protos.CodeExecution) + + @parameterized.named_parameters( + ["string", "google_search_retrieval"], + ["empty_dictionary", {"google_search_retrieval": {}}], + [ + "empty_dictionary_with_dynamic_retrieval_config", + {"google_search_retrieval": {"dynamic_retrieval_config": {}}}, + ], + [ + "dictionary_with_mode_integer", + {"google_search_retrieval": {"dynamic_retrieval_config": {"mode": 0}}}, + ], + [ + "dictionary_with_mode_string", + {"google_search_retrieval": {"dynamic_retrieval_config": {"mode": "DYNAMIC"}}}, + ], + [ + "dictionary_with_dynamic_retrieval_config", + { + "google_search_retrieval": { + "dynamic_retrieval_config": {"mode": "unspecified", "dynamic_threshold": 0.5} + } + }, + ], + [ + "proto_object", + protos.GoogleSearchRetrieval( + dynamic_retrieval_config=protos.DynamicRetrievalConfig( + mode="MODE_UNSPECIFIED", dynamic_threshold=0.5 + ) + ), + ], + [ + "proto_passed_in", + protos.Tool( + google_search_retrieval=protos.GoogleSearchRetrieval( + dynamic_retrieval_config=protos.DynamicRetrievalConfig( + mode="MODE_UNSPECIFIED", dynamic_threshold=0.5 + ) + ) + ), + ], + [ + "proto_object_list", + [ + protos.GoogleSearchRetrieval( + dynamic_retrieval_config=protos.DynamicRetrievalConfig( + mode="MODE_UNSPECIFIED", dynamic_threshold=0.5 + ) + ) + ], + ], + [ + "proto_passed_in_list", + [ + protos.Tool( + google_search_retrieval=protos.GoogleSearchRetrieval( + dynamic_retrieval_config=protos.DynamicRetrievalConfig( + mode="MODE_UNSPECIFIED", dynamic_threshold=0.5 + ) + ) + ) + ], + ], + ) + def test_search_grounding(self, tools): + if self._testMethodName == "test_search_grounding_empty_dictionary": + pass + t = content_types._make_tools(tools) + self.assertIsInstance(t[0].google_search_retrieval, protos.GoogleSearchRetrieval) def test_two_fun_is_one_tool(self): def a(): From be00c19a0f000703687f30b43e2f26adde427896 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 26 Sep 2024 10:36:13 -0700 Subject: [PATCH 56/90] Remove finish_message (#571) Change-Id: I55f8339aea7e71e0b073c1a2ef9b3748500e09ed --- google/generativeai/types/generation_types.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 8bd0a7736..8602d69a4 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -473,8 +473,6 @@ def text(self): "`Part`, but none were returned. The candidate's " f"[finish_reason](https://ai.google.dev/api/generate-content#finishreason) is {fr}." ) - if candidate.finish_message: - msg += 'The `finish_message` is "{candidate.finish_message}".' if fr is FinishReason.FINISH_REASON_UNSPECIFIED: raise ValueError(msg) From b50341ef0d721a59bde8ec7161b8ef276f602d73 Mon Sep 17 00:00:00 2001 From: Hamza NABIL <58815945+Hamza-nabil@users.noreply.github.com> Date: Fri, 27 Sep 2024 23:40:14 +0100 Subject: [PATCH 57/90] Stream file upload (#556) * Add IOBase support to FileServiceClient.create_file * add support to upload file-like object * Give clear errors for 'unknown' mime-types Change-Id: Iea071c396c4cfe2b2c8eacae74dd8fb0acbc128f * Remove duplicate check for mime_type * Add a test uploading from a file-like IO object Change-Id: I572f02ed98b9ca45299b76e7a01695fdcf917e1e * fix type check Change-Id: I220c05eee73ae76ced25254d67332f70a4069f7e --------- Co-authored-by: Mark Daoust --- google/generativeai/client.py | 15 +++++++++++---- google/generativeai/files.py | 30 ++++++++++++++++++++++-------- samples/files.py | 12 ++++++++++++ tests/test_files.py | 6 ++++-- 4 files changed, 49 insertions(+), 14 deletions(-) diff --git a/google/generativeai/client.py b/google/generativeai/client.py index 938018b96..d2eb6b1c9 100644 --- a/google/generativeai/client.py +++ b/google/generativeai/client.py @@ -8,6 +8,7 @@ from typing import Any, cast from collections.abc import Sequence import httplib2 +from io import IOBase import google.ai.generativelanguage as glm import google.generativeai.protos as protos @@ -88,7 +89,7 @@ def _setup_discovery_api(self, metadata: dict | Sequence[tuple[str, str]] = ()): def create_file( self, - path: str | pathlib.Path | os.PathLike, + path: str | pathlib.Path | os.PathLike | IOBase, *, mime_type: str | None = None, name: str | None = None, @@ -105,9 +106,15 @@ def create_file( if display_name is not None: file["displayName"] = display_name - media = googleapiclient.http.MediaFileUpload( - filename=path, mimetype=mime_type, resumable=resumable - ) + if isinstance(path, IOBase): + media = googleapiclient.http.MediaIoBaseUpload( + fd=path, mimetype=mime_type, resumable=resumable + ) + else: + media = googleapiclient.http.MediaFileUpload( + filename=path, mimetype=mime_type, resumable=resumable + ) + request = self._discovery_api.media().upload(body={"file": file}, media_body=media) for key, value in metadata: request.headers[key] = value diff --git a/google/generativeai/files.py b/google/generativeai/files.py index c0d8e1e0a..b2581bdcd 100644 --- a/google/generativeai/files.py +++ b/google/generativeai/files.py @@ -21,6 +21,7 @@ import logging from google.generativeai import protos from itertools import islice +from io import IOBase from google.generativeai.types import file_types @@ -32,7 +33,7 @@ def upload_file( - path: str | pathlib.Path | os.PathLike, + path: str | pathlib.Path | os.PathLike | IOBase, *, mime_type: str | None = None, name: str | None = None, @@ -42,7 +43,7 @@ def upload_file( """Calls the API to upload a file using a supported file service. Args: - path: The path to the file to be uploaded. + path: The path to the file or a file-like object (e.g., BytesIO) to be uploaded. mime_type: The MIME type of the file. If not provided, it will be inferred from the file extension. name: The name of the file in the destination (e.g., 'files/sample-image'). @@ -57,17 +58,30 @@ def upload_file( """ client = get_default_file_client() - path = pathlib.Path(os.fspath(path)) + if isinstance(path, IOBase): + if mime_type is None: + raise ValueError( + "Unknown mime type: When passing a file like object to `path` (instead of a\n" + " path-like object) you must set the `mime_type` argument" + ) + else: + path = pathlib.Path(os.fspath(path)) - if mime_type is None: - mime_type, _ = mimetypes.guess_type(path) + if display_name is None: + display_name = path.name + + if mime_type is None: + mime_type, _ = mimetypes.guess_type(path) + + if mime_type is None: + raise ValueError( + "Unknown mime type: Could not determine the mimetype for your file\n" + " please set the `mime_type` argument" + ) if name is not None and "/" not in name: name = f"files/{name}" - if display_name is None: - display_name = path.name - response = client.create_file( path=path, mime_type=mime_type, name=name, display_name=display_name, resumable=resumable ) diff --git a/samples/files.py b/samples/files.py index cbed68a1e..8f98365aa 100644 --- a/samples/files.py +++ b/samples/files.py @@ -83,6 +83,18 @@ def test_files_create_pdf(self): print(response.text) # [END files_create_pdf] + def test_files_create_from_IO(self): + # [START files_create_io] + # You can pass a file-like object, instead of a path. + # Useful for streaming. + model = genai.GenerativeModel("gemini-1.5-flash") + fpath = media / "test.pdf" + with open(fpath, "rb") as f: + sample_pdf = genai.upload_file(f, mime_type="application/pdf") + response = model.generate_content(["Give me a summary of this pdf file.", sample_pdf]) + print(response.text) + # [END files_create_io] + def test_files_list(self): # [START files_list] print("My files:") diff --git a/tests/test_files.py b/tests/test_files.py index cb48316bd..0f7ca5707 100644 --- a/tests/test_files.py +++ b/tests/test_files.py @@ -18,6 +18,7 @@ import collections import datetime +import io import os from typing import Iterable, Sequence import pathlib @@ -38,7 +39,7 @@ def __init__(self, test): def create_file( self, - path: str | pathlib.Path | os.PathLike, + path: str | io.IOBase | os.PathLike, *, mime_type: str | None = None, name: str | None = None, @@ -102,12 +103,13 @@ def test_video_metadata(self): protos.File( uri="https://test", state="ACTIVE", + mime_type="video/quicktime", video_metadata=dict(video_duration=datetime.timedelta(seconds=30)), error=dict(code=7, message="ok?"), ) ) - f = genai.upload_file(path="dummy") + f = genai.upload_file(path="dummy.mov") self.assertEqual(google.rpc.status_pb2.Status(code=7, message="ok?"), f.error) self.assertEqual( protos.VideoMetadata(dict(video_duration=datetime.timedelta(seconds=30))), From 4354af6fb8261e47227e7b7665bb2a6ca7ac067f Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 30 Sep 2024 09:17:44 -0700 Subject: [PATCH 58/90] Update default model name (#576) Change-Id: I2d8ef6d251d0128695e45df119c31d6b63e26ba7 --- google/generativeai/generative_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/generativeai/generative_models.py b/google/generativeai/generative_models.py index 50b15261a..134430b2e 100644 --- a/google/generativeai/generative_models.py +++ b/google/generativeai/generative_models.py @@ -72,7 +72,7 @@ class GenerativeModel: def __init__( self, - model_name: str = "gemini-pro", + model_name: str = "gemini-1.5-flash-002", safety_settings: safety_types.SafetySettingOptions | None = None, generation_config: generation_types.GenerationConfigType | None = None, tools: content_types.FunctionLibraryType | None = None, From 8f77cc6ac99937cd3a81299ecf79608b91b06bbb Mon Sep 17 00:00:00 2001 From: Shilpa Kancharla Date: Tue, 1 Oct 2024 13:24:26 -0700 Subject: [PATCH 59/90] Removing OAuth from tuning for REST (#579) * Removing OAuth from tuning for REST * remove key * Fix line continuation Change-Id: I639aa453699766bf31f7722a59f4157a53cf7de4 --------- Co-authored-by: Mark Daoust --- samples/rest/tuned_models.sh | 40 +++++++++++------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/samples/rest/tuned_models.sh b/samples/rest/tuned_models.sh index 1e105377e..9b652febd 100644 --- a/samples/rest/tuned_models.sh +++ b/samples/rest/tuned_models.sh @@ -1,14 +1,9 @@ set -eu -access_token=$(gcloud auth application-default print-access-token) - - echo "[START tuned_models_create]" # [START tuned_models_create] -curl -X POST https://generativelanguage.googleapis.com/v1beta/tunedModels \ +curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" \ -d ' { "display_name": "number generator model", @@ -82,10 +77,9 @@ tuning_done=false while [[ "$tuning_done" != "true" ]]; do sleep 5 - curl -X GET https://generativelanguage.googleapis.com/v1/${operation} \ + curl -X GET "https://generativelanguage.googleapis.com/v1/${operation}?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" 2> /dev/null > tuning_operation.json + 2> /dev/null > tuning_operation.json complete=$(jq .metadata.completedPercent < tuning_operation.json) tput cuu1 @@ -96,10 +90,8 @@ done # Or get the TunedModel and check it's state. The model is ready to use if the state is active. modelname=$(cat tunemodel.json | jq ".metadata.tunedModel" | tr -d '"') -curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname} \ - -H 'Content-Type: application/json' \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" > tuned_model.json +curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' > tuned_model.json cat tuned_model.json | jq ".state" # [END tuned_models_create] @@ -107,10 +99,8 @@ cat tuned_model.json | jq ".state" echo "[START tuned_models_generate_content]" # [START tuned_models_generate_content] -curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generateContent \ +curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generateContent?key=$GOOGLE_API_KEY \ -H 'Content-Type: application/json' \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" \ -d '{ "contents": [{ "parts": [{ @@ -122,10 +112,8 @@ curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generat echo "[START tuned_models_get]" # [START tuned_models_get] -curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname} \ - -H 'Content-Type: application/json' \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" | grep state +curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' | grep state # [END tuned_models_get] echo "[START tuned_models_list]" @@ -142,18 +130,14 @@ jq .tunedModels[].name < tuned_models.json page_token=$(jq .nextPageToken < tuned_models.json | tr -d '"') if [[ "$page_token" != "null"" ]]; then -curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5\&page_token=${page_token} \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" > tuned_models2.json +curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5\&page_token=${page_token}?key=$GOOGLE_API_KEY \ + -H "Content-Type: application/json" > tuned_models2.json jq .tunedModels[].name < tuned_models.json fi # [END tuned_models_list] echo "[START tuned_models_delete]" # [START tuned_models_delete] -curl -X DELETE https://generativelanguage.googleapis.com/v1beta/${modelname} \ - -H 'Content-Type: application/json' \ - -H "Authorization: Bearer ${access_token}" \ - -H "x-goog-user-project: ${project_id}" +curl -X DELETE https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GOOGLE_API_KEY \ + -H 'Content-Type: application/json' # [END tuned_models_delete] \ No newline at end of file From 7546026dcad2bed72b181845ed93451bbefd2120 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Mon, 7 Oct 2024 08:34:21 -0700 Subject: [PATCH 60/90] update version (#588) Change-Id: I998fa990bc7dfb220e78cd7f9692c8530993d7c4 --- google/generativeai/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/google/generativeai/version.py b/google/generativeai/version.py index f6bf29d13..c612b5034 100644 --- a/google/generativeai/version.py +++ b/google/generativeai/version.py @@ -14,4 +14,4 @@ # limitations under the License. from __future__ import annotations -__version__ = "0.8.2" +__version__ = "0.8.3" From c8eadc4ab76eb6140b2702614bf081fd65804280 Mon Sep 17 00:00:00 2001 From: Na'aman Hirschfeld Date: Wed, 9 Oct 2024 01:23:33 +0200 Subject: [PATCH 61/90] feat: add py.typed (#509) * Update setup.py * Create py.typed * Update setup.py to trigger CI. --------- Co-authored-by: Mark Daoust --- google/generativeai/py.typed | 1 + setup.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 google/generativeai/py.typed diff --git a/google/generativeai/py.typed b/google/generativeai/py.typed new file mode 100644 index 000000000..d57989efb --- /dev/null +++ b/google/generativeai/py.typed @@ -0,0 +1 @@ +# see: https://peps.python.org/pep-0561/ diff --git a/setup.py b/setup.py index 7aac23e9c..c61393765 100644 --- a/setup.py +++ b/setup.py @@ -3,7 +3,7 @@ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. -# You may obtain a copy of the License at +# You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # @@ -88,6 +88,7 @@ def get_version(): "Programming Language :: Python :: 3.12", "Operating System :: OS Independent", "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Typing :: Typed", ], platforms="Posix; MacOS X; Windows", packages=packages, @@ -97,4 +98,5 @@ def get_version(): extras_require=extras_require, include_package_data=True, zip_safe=False, + package_data={"generativeai": ["py.typed"]}, ) From e9b0cdefb66bb4efa8bccef4809b7c8bd7d578b2 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 15 Oct 2024 18:13:20 -0700 Subject: [PATCH 62/90] Remove references to Pro 1.0 (#600) * remove references to 1.0-pro Change-Id: I405c87d495c73550cfbd00a13249cb1e30ab0989 * remove references to gemini-pro Change-Id: Ied2f0b7112dd5d61390da3e84457a2fb3f770665 * Update models.py * format Change-Id: Ib3a0c90bfc6ec7f8f793917b3140769e2635a8e9 --- docs/api/google/generativeai/ChatSession.md | 4 +- .../google/generativeai/GenerativeModel.md | 8 +-- docs/api/google/generativeai/get_model.md | 2 +- google/generativeai/generative_models.py | 10 ++-- google/generativeai/models.py | 4 +- google/generativeai/notebook/text_model.py | 2 +- samples/rest/tuned_models.sh | 2 +- tests/test_generative_models.py | 50 ++++++++++--------- tests/test_generative_models_async.py | 6 +-- 9 files changed, 46 insertions(+), 42 deletions(-) diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md index 3898a2ef1..ac58e2e4b 100644 --- a/docs/api/google/generativeai/ChatSession.md +++ b/docs/api/google/generativeai/ChatSession.md @@ -39,7 +39,7 @@ Contains an ongoing conversation with the model. ``` ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) @@ -136,7 +136,7 @@ Sends the conversation history with the added message and returns the model's re Appends the request and response to the conversation history. ``` ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md index 9b9e7ff6f..71f293ebe 100644 --- a/docs/api/google/generativeai/GenerativeModel.md +++ b/docs/api/google/generativeai/GenerativeModel.md @@ -31,7 +31,7 @@ The `genai.GenerativeModel` class wraps default parameters for calls to google.generativeai.GenerativeModel( - model_name: str = 'gemini-pro', + model_name: str = 'gemini-1.5-flash', safety_settings: (safety_types.SafetySettingOptions | None) = None, generation_config: (generation_types.GenerationConfigType | None) = None, tools: (content_types.FunctionLibraryType | None) = None, @@ -51,7 +51,7 @@ requests. What media-types are supported for input and output is model-dependant >>> import google.generativeai as genai >>> import PIL.Image >>> genai.configure(api_key='YOUR_API_KEY') ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content('Tell me a story about a magic backpack') >>> result.text "In the quaint little town of Lakeside, there lived a young girl named Lily..." @@ -62,7 +62,7 @@ requests. What media-types are supported for input and output is model-dependant ``` ->>> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content([ ... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')]) >>> result.text @@ -270,7 +270,7 @@ This >> model = genai.GenerativeModel('models/gemini-pro') +>>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> response = model.generate_content('Tell me a story about a magic backpack') >>> response.text ``` diff --git a/docs/api/google/generativeai/get_model.md b/docs/api/google/generativeai/get_model.md index e488dbfaa..40e5b0e46 100644 --- a/docs/api/google/generativeai/get_model.md +++ b/docs/api/google/generativeai/get_model.md @@ -38,7 +38,7 @@ Calls the API to fetch a model by name. ``` import pprint -model = genai.get_model('models/gemini-pro') +model = genai.get_model('models/gemini-1.5-flash') pprint.pprint(model) ``` diff --git a/google/generativeai/generative_models.py b/google/generativeai/generative_models.py index 134430b2e..8d331a9f6 100644 --- a/google/generativeai/generative_models.py +++ b/google/generativeai/generative_models.py @@ -36,14 +36,14 @@ class GenerativeModel: >>> import google.generativeai as genai >>> import PIL.Image >>> genai.configure(api_key='YOUR_API_KEY') - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content('Tell me a story about a magic backpack') >>> result.text "In the quaint little town of Lakeside, there lived a young girl named Lily..." Multimodal input: - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> result = model.generate_content([ ... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')]) >>> result.text @@ -250,7 +250,7 @@ def generate_content( This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn conversations. - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> response = model.generate_content('Tell me a story about a magic backpack') >>> response.text @@ -481,7 +481,7 @@ def start_chat( class ChatSession: """Contains an ongoing conversation with the model. - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) @@ -524,7 +524,7 @@ def send_message( Appends the request and response to the conversation history. - >>> model = genai.GenerativeModel('models/gemini-pro') + >>> model = genai.GenerativeModel('models/gemini-1.5-flash') >>> chat = model.start_chat() >>> response = chat.send_message("Hello") >>> print(response.text) diff --git a/google/generativeai/models.py b/google/generativeai/models.py index 4b95a2470..b23a7ce88 100644 --- a/google/generativeai/models.py +++ b/google/generativeai/models.py @@ -40,7 +40,7 @@ def get_model( ``` import pprint - model = genai.get_model('models/gemini-pro') + model = genai.get_model('models/gemini-1.5-flash') pprint.pprint(model) ``` @@ -112,7 +112,7 @@ def get_tuned_model( ``` import pprint - model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001') + model = genai.get_tuned_model('tunedModels/gemini-1.5-flash') pprint.pprint(model) ``` diff --git a/google/generativeai/notebook/text_model.py b/google/generativeai/notebook/text_model.py index 38375e348..7360bbfbd 100644 --- a/google/generativeai/notebook/text_model.py +++ b/google/generativeai/notebook/text_model.py @@ -20,7 +20,7 @@ from google.generativeai.types import generation_types from google.generativeai.notebook.lib import model as model_lib -_DEFAULT_MODEL = "models/gemini-pro" +_DEFAULT_MODEL = "models/gemini-1.5-flash" class TextModel(model_lib.AbstractModel): diff --git a/samples/rest/tuned_models.sh b/samples/rest/tuned_models.sh index 9b652febd..5594734f6 100644 --- a/samples/rest/tuned_models.sh +++ b/samples/rest/tuned_models.sh @@ -7,7 +7,7 @@ curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$ -d ' { "display_name": "number generator model", - "base_model": "models/gemini-1.0-pro-001", + "base_model": "models/gemini-1.5-flash-001-tuning", "tuning_task": { "hyperparameters": { "batch_size": 2, diff --git a/tests/test_generative_models.py b/tests/test_generative_models.py index fa69099ba..74469e5b8 100644 --- a/tests/test_generative_models.py +++ b/tests/test_generative_models.py @@ -115,7 +115,7 @@ def setUp(self): def test_hello(self): # Generate text from text prompt - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") self.responses["generate_content"].append(simple_response("world!")) @@ -138,7 +138,7 @@ def test_hello(self): ) def test_image(self, content): # Generate text from image - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") cat = "It's a cat" self.responses["generate_content"].append(simple_response(cat)) @@ -172,7 +172,7 @@ def test_image(self, content): ) def test_generation_config_overwrite(self, config1, config2): # Generation config - model = generative_models.GenerativeModel("gemini-pro", generation_config=config1) + model = generative_models.GenerativeModel("gemini-1.5-flash", generation_config=config1) self.responses["generate_content"] = [ simple_response(" world!"), @@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2): ) def test_safety_overwrite(self, safe1, safe2): # Safety - model = generative_models.GenerativeModel("gemini-pro", safety_settings=safe1) + model = generative_models.GenerativeModel("gemini-1.5-flash", safety_settings=safe1) self.responses["generate_content"] = [ simple_response(" world!"), @@ -253,7 +253,7 @@ def test_stream_basic(self): chunks = ["first", " second", " third"] self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello") @@ -267,7 +267,7 @@ def test_stream_lookahead(self): chunks = ["first", " second", " third"] self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello") @@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self): ] self.responses["stream_generate_content"] = [(chunk for chunk in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Bad stuff!", stream=True) self.assertEqual( @@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self): ] self.responses["stream_generate_content"] = [(chunk for chunk in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) self.assertEqual( @@ -389,7 +389,7 @@ def add(a: int, b: int) -> int: def test_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["generate_content"] = [ @@ -423,7 +423,7 @@ def test_chat(self): def test_chat_roles(self): self.responses["generate_content"] = [simple_response("hello!")] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() response = chat.send_message("hello?") history = chat.history @@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config): ) self.responses["generate_content"] = [simple_response("echo echo")] - model = generative_models.GenerativeModel("gemini-pro", tools=tools) + model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools) _ = model.generate_content("Hello", tools=[tools], tool_config=tool_config) req = self.observed_requests[0] @@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config): ) def test_system_instruction(self, instruction, expected_instr): self.responses["generate_content"] = [simple_response("echo echo")] - model = generative_models.GenerativeModel("gemini-pro", system_instruction=instruction) + model = generative_models.GenerativeModel( + "gemini-1.5-flash", system_instruction=instruction + ) _ = model.generate_content("test") @@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs): ) def test_repr_for_unary_non_streamed_response(self): - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") self.responses["generate_content"].append(simple_response("world!")) response = model.generate_content("Hello") @@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self): chunks = ["first", " second", " third"] self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Hello", stream=True) iterator = iter(response) @@ -980,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self): ] self.responses["stream_generate_content"] = [(chunk for chunk in chunks)] - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") response = model.generate_content("Bad stuff!", stream=True) result = repr(response) @@ -1096,7 +1098,7 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self): def test_repr_for_multi_turn_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["generate_content"] = [ @@ -1119,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self): """\ ChatSession( model=genai.GenerativeModel( - model_name='models/gemini-pro', + model_name='models/gemini-1.5-flash', generation_config={}, safety_settings={}, tools=None, @@ -1133,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self): def test_repr_for_incomplete_streaming_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["stream_generate_content"] = [ @@ -1148,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self): """\ ChatSession( model=genai.GenerativeModel( - model_name='models/gemini-pro', + model_name='models/gemini-1.5-flash', generation_config={}, safety_settings={}, tools=None, @@ -1162,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self): def test_repr_for_broken_streaming_chat(self): # Multi turn chat - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() self.responses["stream_generate_content"] = [ @@ -1193,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self): """\ ChatSession( model=genai.GenerativeModel( - model_name='models/gemini-pro', + model_name='models/gemini-1.5-flash', generation_config={}, safety_settings={}, tools=None, @@ -1206,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self): self.assertEqual(expected, result) def test_repr_for_system_instruction(self): - model = generative_models.GenerativeModel("gemini-pro", system_instruction="Be excellent.") + model = generative_models.GenerativeModel( + "gemini-1.5-flash", system_instruction="Be excellent." + ) result = repr(model) self.assertIn("system_instruction='Be excellent.'", result) @@ -1237,7 +1241,7 @@ def test_chat_with_request_options(self): ) request_options = {"timeout": 120} - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel("gemini-1.5-flash") chat = model.start_chat() chat.send_message("hello", request_options=helper_types.RequestOptions(**request_options)) diff --git a/tests/test_generative_models_async.py b/tests/test_generative_models_async.py index dd9bc3b62..b37c65235 100644 --- a/tests/test_generative_models_async.py +++ b/tests/test_generative_models_async.py @@ -80,7 +80,7 @@ async def count_tokens( async def test_basic(self): # Generate text from text prompt - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") self.responses["generate_content"] = [simple_response("world!")] @@ -93,7 +93,7 @@ async def test_basic(self): async def test_streaming(self): # Generate text from text prompt - model = generative_models.GenerativeModel(model_name="gemini-pro") + model = generative_models.GenerativeModel(model_name="gemini-1.5-flash") async def responses(): for c in "world!": @@ -195,7 +195,7 @@ async def test_tool_config(self, tool_config, expected_tool_config): ) self.responses["generate_content"] = [simple_response("echo echo")] - model = generative_models.GenerativeModel("gemini-pro", tools=tools) + model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools) _ = await model.generate_content_async("Hello", tools=[tools], tool_config=tool_config) req = self.observed_requests[0] From 3d91916d2ffae84d8559b943bea3649e0ae8eb45 Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 24 Oct 2024 10:45:31 -0700 Subject: [PATCH 63/90] Fix GenerationConfig to match the API. (#609) * Fix GenerationConfig, this doesn't match the API Change-Id: I4e0c3b45125023e056c3d7e1549eafb47e42815a * format Change-Id: I161dd7b9b371e005b4385faeaa84f6d141e09b43 --- google/generativeai/types/generation_types.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py index 8602d69a4..45ca6b21b 100644 --- a/google/generativeai/types/generation_types.py +++ b/google/generativeai/types/generation_types.py @@ -85,6 +85,8 @@ class GenerationConfigDict(TypedDict, total=False): temperature: float response_mime_type: str response_schema: protos.Schema | Mapping[str, Any] # fmt: off + presence_penalty: float + frequency_penalty: float @dataclasses.dataclass @@ -144,8 +146,6 @@ class GenerationConfig: Note: The default value varies by model, see the `Model.top_k` attribute of the `Model` returned the `genai.get_model` function. - seed: - Optional. Seed used in decoding. If not set, the request uses a randomly generated seed. response_mime_type: Optional. Output response mimetype of the generated candidate text. @@ -161,10 +161,6 @@ class GenerationConfig: Optional. frequency_penalty: Optional. - response_logprobs: - Optional. If true, export the `logprobs` results in response. - logprobs: - Optional. Number of candidates of log probabilities to return at each step of decoding. """ candidate_count: int | None = None @@ -173,13 +169,10 @@ class GenerationConfig: temperature: float | None = None top_p: float | None = None top_k: int | None = None - seed: int | None = None response_mime_type: str | None = None response_schema: protos.Schema | Mapping[str, Any] | type | None = None presence_penalty: float | None = None frequency_penalty: float | None = None - response_logprobs: bool | None = None - logprobs: int | None = None GenerationConfigType = Union[protos.GenerationConfig, GenerationConfigDict, GenerationConfig] From db8221af3ebe86dfec32a0f012beeda8659a6656 Mon Sep 17 00:00:00 2001 From: Sumeet213 <75944652+Sumeet213@users.noreply.github.com> Date: Thu, 24 Oct 2024 23:35:03 +0530 Subject: [PATCH 64/90] fix: thread-safe discovery API setup (Issues#327) (#583) --- google/generativeai/client.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/google/generativeai/client.py b/google/generativeai/client.py index d2eb6b1c9..c9c5c8c5b 100644 --- a/google/generativeai/client.py +++ b/google/generativeai/client.py @@ -5,6 +5,7 @@ import inspect import dataclasses import pathlib +import threading from typing import Any, cast from collections.abc import Sequence import httplib2 @@ -64,6 +65,7 @@ def patch_colab_gce_credentials(): class FileServiceClient(glm.FileServiceClient): def __init__(self, *args, **kwargs): self._discovery_api = None + self._local = threading.local() super().__init__(*args, **kwargs) def _setup_discovery_api(self, metadata: dict | Sequence[tuple[str, str]] = ()): @@ -83,7 +85,7 @@ def _setup_discovery_api(self, metadata: dict | Sequence[tuple[str, str]] = ()): request.http.close() discovery_doc = content.decode("utf-8") - self._discovery_api = googleapiclient.discovery.build_from_document( + self._local.discovery_api = googleapiclient.discovery.build_from_document( discovery_doc, developerKey=api_key ) @@ -115,7 +117,7 @@ def create_file( filename=path, mimetype=mime_type, resumable=resumable ) - request = self._discovery_api.media().upload(body={"file": file}, media_body=media) + request = self._local.discovery_api.media().upload(body={"file": file}, media_body=media) for key, value in metadata: request.headers[key] = value result = request.execute() From c16df61f883a5393950b547e9b40e69a51c0154a Mon Sep 17 00:00:00 2001 From: Weston Ruter Date: Tue, 29 Oct 2024 15:08:24 -0700 Subject: [PATCH 65/90] Use double quotes for JSON object keys (#612) --- samples/rest/code_execution.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh index 73c297777..44fbf679c 100644 --- a/samples/rest/code_execution.sh +++ b/samples/rest/code_execution.sh @@ -4,7 +4,7 @@ echo "[START code_execution_basic]" # [START code_execution_basic] curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ --d ' {"tools": [{'code_execution': {}}], +-d ' {"tools": [{"code_execution": {}}], "contents": { "parts": { @@ -18,7 +18,7 @@ echo "[START code_execution_chat]" # [START code_execution_chat] curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \ -H 'Content-Type: application/json' \ --d '{"tools": [{'code_execution': {}}], +-d '{"tools": [{"code_execution": {}}], "contents": [ { "role": "user", From 0e5c5f25fe4ce266791fa2afb20d17dee780ca9e Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Tue, 29 Oct 2024 16:30:21 -0700 Subject: [PATCH 66/90] Add whitespace to generated tables to fix markdown in tables. (#602) * fixes 563 Change-Id: Ifb9743822c744845aac4deea9963a0c6e6389362 * Refresh docs for v0.8.3 Change-Id: I7536e809b9a1b8f26fc970a42c83000b0d607793 * remove align-left Change-Id: I0462300f198e3de020cbfc032e575e094b88d360 * format Change-Id: If83605c320d2b7255618737c2cd985a153584343 * refresh Change-Id: I0776467ac6471b2f8f06f089a54e447d149b7d78 * remove devsite headers Change-Id: I2d10299b5cffc2e353b71dacfabb3a7e9f22a9fd * remove devsite headers Change-Id: I1b9ca5bf2d46dbd39b8cb226d153107851e2e3f0 --- docs/api/google/generativeai.md | 34 +- docs/api/google/generativeai/ChatSession.md | 44 +- .../google/generativeai/GenerativeModel.md | 79 +- docs/api/google/generativeai/_api_cache.json | 2779 ++++++++++------- docs/api/google/generativeai/_toc.yaml | 74 +- docs/api/google/generativeai/all_symbols.md | 36 +- docs/api/google/generativeai/api_report.pb | Bin 49595 -> 54075 bytes docs/api/google/generativeai/caching.md | 49 + .../generativeai/caching/CachedContent.md | 448 +++ .../caching/get_default_cache_client.md | 26 + docs/api/google/generativeai/chat.md | 198 -- docs/api/google/generativeai/chat_async.md | 198 -- docs/api/google/generativeai/configure.md | 22 +- .../generativeai/count_message_tokens.md | 41 - .../google/generativeai/count_text_tokens.md | 37 - .../google/generativeai/create_tuned_model.md | 62 +- docs/api/google/generativeai/delete_file.md | 10 +- .../google/generativeai/delete_tuned_model.md | 8 +- docs/api/google/generativeai/embed_content.md | 34 +- .../generativeai/embed_content_async.md | 8 +- .../generativeai/generate_embeddings.md | 90 - docs/api/google/generativeai/generate_text.md | 172 - .../api/google/generativeai/get_base_model.md | 22 +- docs/api/google/generativeai/get_file.md | 10 +- docs/api/google/generativeai/get_model.md | 22 +- docs/api/google/generativeai/get_operation.md | 8 +- .../google/generativeai/get_tuned_model.md | 22 +- docs/api/google/generativeai/list_files.md | 10 +- docs/api/google/generativeai/list_models.md | 22 +- .../google/generativeai/list_operations.md | 8 +- .../google/generativeai/list_tuned_models.md | 22 +- docs/api/google/generativeai/protos.md | 34 +- .../protos/AttributionSourceId.md | 20 +- .../AttributionSourceId/GroundingPassageId.md | 18 +- .../SemanticRetrieverChunk.md | 18 +- .../protos/BatchCreateChunksRequest.md | 16 +- .../protos/BatchCreateChunksResponse.md | 12 +- .../protos/BatchDeleteChunksRequest.md | 16 +- .../protos/BatchEmbedContentsRequest.md | 18 +- .../protos/BatchEmbedContentsResponse.md | 14 +- .../protos/BatchEmbedTextRequest.md | 20 +- .../protos/BatchEmbedTextResponse.md | 12 +- .../protos/BatchUpdateChunksRequest.md | 16 +- .../protos/BatchUpdateChunksResponse.md | 12 +- docs/api/google/generativeai/protos/Blob.md | 18 +- .../generativeai/protos/CachedContent.md | 57 +- .../protos/CachedContent/UsageMetadata.md | 12 +- .../google/generativeai/protos/Candidate.md | 85 +- .../protos/Candidate/FinishReason.md | 271 +- docs/api/google/generativeai/protos/Chunk.md | 33 +- .../google/generativeai/protos/Chunk/State.md | 117 +- .../google/generativeai/protos/ChunkData.md | 12 +- .../generativeai/protos/CitationMetadata.md | 12 +- .../generativeai/protos/CitationSource.md | 24 +- .../generativeai/protos/CodeExecution.md | 10 +- .../protos/CodeExecutionResult.md | 19 +- .../protos/CodeExecutionResult/Outcome.md | 119 +- .../google/generativeai/protos/Condition.md | 21 +- .../generativeai/protos/Condition/Operator.md | 162 +- .../api/google/generativeai/protos/Content.md | 18 +- .../generativeai/protos/ContentEmbedding.md | 14 +- .../generativeai/protos/ContentFilter.md | 19 +- docs/api/google/generativeai/protos/Corpus.md | 24 +- .../protos/CountMessageTokensRequest.md | 16 +- .../protos/CountMessageTokensResponse.md | 12 +- .../protos/CountTextTokensRequest.md | 16 +- .../protos/CountTextTokensResponse.md | 12 +- .../generativeai/protos/CountTokensRequest.md | 35 +- .../protos/CountTokensResponse.md | 26 +- .../protos/CreateCachedContentRequest.md | 12 +- .../generativeai/protos/CreateChunkRequest.md | 16 +- .../protos/CreateCorpusRequest.md | 12 +- .../protos/CreateDocumentRequest.md | 16 +- .../generativeai/protos/CreateFileRequest.md | 12 +- .../generativeai/protos/CreateFileResponse.md | 12 +- .../protos/CreatePermissionRequest.md | 16 +- .../protos/CreateTunedModelMetadata.md | 30 +- .../protos/CreateTunedModelRequest.md | 20 +- .../generativeai/protos/CustomMetadata.md | 24 +- .../api/google/generativeai/protos/Dataset.md | 14 +- .../protos/DeleteCachedContentRequest.md | 12 +- .../generativeai/protos/DeleteChunkRequest.md | 12 +- .../protos/DeleteCorpusRequest.md | 16 +- .../protos/DeleteDocumentRequest.md | 16 +- .../generativeai/protos/DeleteFileRequest.md | 12 +- .../protos/DeletePermissionRequest.md | 12 +- .../protos/DeleteTunedModelRequest.md | 14 +- .../google/generativeai/protos/Document.md | 28 +- .../protos/DynamicRetrievalConfig.md | 66 + .../protos/DynamicRetrievalConfig/Mode.md | 651 ++++ .../protos/EmbedContentRequest.md | 34 +- .../protos/EmbedContentResponse.md | 14 +- .../generativeai/protos/EmbedTextRequest.md | 16 +- .../generativeai/protos/EmbedTextResponse.md | 12 +- .../google/generativeai/protos/Embedding.md | 12 +- .../api/google/generativeai/protos/Example.md | 16 +- .../generativeai/protos/ExecutableCode.md | 19 +- .../protos/ExecutableCode/Language.md | 101 +- docs/api/google/generativeai/protos/File.md | 57 +- .../google/generativeai/protos/File/State.md | 117 +- .../google/generativeai/protos/FileData.md | 18 +- .../generativeai/protos/FunctionCall.md | 18 +- .../protos/FunctionCallingConfig.md | 19 +- .../protos/FunctionCallingConfig/Mode.md | 119 +- .../protos/FunctionDeclaration.md | 22 +- .../generativeai/protos/FunctionResponse.md | 18 +- .../protos/GenerateAnswerRequest.md | 60 +- .../GenerateAnswerRequest/AnswerStyle.md | 119 +- .../protos/GenerateAnswerResponse.md | 40 +- .../GenerateAnswerResponse/InputFeedback.md | 23 +- .../InputFeedback/BlockReason.md | 112 +- .../protos/GenerateContentRequest.md | 91 +- .../protos/GenerateContentResponse.md | 42 +- .../GenerateContentResponse/PromptFeedback.md | 22 +- .../PromptFeedback/BlockReason.md | 163 +- .../GenerateContentResponse/UsageMetadata.md | 40 +- .../protos/GenerateMessageRequest.md | 32 +- .../protos/GenerateMessageResponse.md | 20 +- .../protos/GenerateTextRequest.md | 44 +- .../protos/GenerateTextResponse.md | 20 +- .../generativeai/protos/GenerationConfig.md | 205 +- .../protos/GetCachedContentRequest.md | 12 +- .../generativeai/protos/GetChunkRequest.md | 12 +- .../generativeai/protos/GetCorpusRequest.md | 12 +- .../generativeai/protos/GetDocumentRequest.md | 12 +- .../generativeai/protos/GetFileRequest.md | 12 +- .../generativeai/protos/GetModelRequest.md | 12 +- .../protos/GetPermissionRequest.md | 12 +- .../protos/GetTunedModelRequest.md | 14 +- .../protos/GoogleSearchRetrieval.md | 47 + .../protos/GroundingAttribution.md | 18 +- .../generativeai/protos/GroundingChunk.md | 51 + .../generativeai/protos/GroundingChunk/Web.md | 61 + .../generativeai/protos/GroundingMetadata.md | 90 + .../generativeai/protos/GroundingPassage.md | 18 +- .../generativeai/protos/GroundingPassages.md | 14 +- .../generativeai/protos/GroundingSupport.md | 80 + .../generativeai/protos/HarmCategory.md | 237 +- .../generativeai/protos/Hyperparameters.md | 26 +- .../protos/ListCachedContentsRequest.md | 16 +- .../protos/ListCachedContentsResponse.md | 16 +- .../generativeai/protos/ListChunksRequest.md | 20 +- .../generativeai/protos/ListChunksResponse.md | 16 +- .../generativeai/protos/ListCorporaRequest.md | 16 +- .../protos/ListCorporaResponse.md | 16 +- .../protos/ListDocumentsRequest.md | 20 +- .../protos/ListDocumentsResponse.md | 16 +- .../generativeai/protos/ListFilesRequest.md | 16 +- .../generativeai/protos/ListFilesResponse.md | 16 +- .../generativeai/protos/ListModelsRequest.md | 25 +- .../generativeai/protos/ListModelsResponse.md | 18 +- .../protos/ListPermissionsRequest.md | 20 +- .../protos/ListPermissionsResponse.md | 16 +- .../protos/ListTunedModelsRequest.md | 22 +- .../protos/ListTunedModelsResponse.md | 18 +- .../generativeai/protos/LogprobsResult.md | 65 + .../protos/LogprobsResult/Candidate.md | 75 + .../protos/LogprobsResult/TopCandidates.md | 47 + .../api/google/generativeai/protos/Message.md | 20 +- .../generativeai/protos/MessagePrompt.md | 20 +- .../generativeai/protos/MetadataFilter.md | 16 +- docs/api/google/generativeai/protos/Model.md | 79 +- docs/api/google/generativeai/protos/Part.md | 38 +- .../google/generativeai/protos/Permission.md | 26 +- .../protos/Permission/GranteeType.md | 117 +- .../generativeai/protos/Permission/Role.md | 117 +- .../generativeai/protos/PredictRequest.md | 75 + .../generativeai/protos/PredictResponse.md | 46 + .../generativeai/protos/QueryCorpusRequest.md | 24 +- .../protos/QueryCorpusResponse.md | 12 +- .../protos/QueryDocumentRequest.md | 24 +- .../protos/QueryDocumentResponse.md | 12 +- .../generativeai/protos/RelevantChunk.md | 16 +- .../generativeai/protos/RetrievalMetadata.md | 52 + .../generativeai/protos/SafetyFeedback.md | 18 +- .../generativeai/protos/SafetyRating.md | 23 +- .../generativeai/protos/SafetySetting.md | 19 +- docs/api/google/generativeai/protos/Schema.md | 72 +- .../protos/Schema/PropertiesEntry.md | 26 +- .../generativeai/protos/SearchEntryPoint.md | 61 + .../api/google/generativeai/protos/Segment.md | 91 + .../protos/SemanticRetrieverConfig.md | 38 +- .../google/generativeai/protos/StringList.md | 12 +- .../google/generativeai/protos/TaskType.md | 155 +- .../generativeai/protos/TextCompletion.md | 20 +- .../google/generativeai/protos/TextPrompt.md | 12 +- docs/api/google/generativeai/protos/Tool.md | 49 +- .../google/generativeai/protos/ToolConfig.md | 14 +- .../protos/TransferOwnershipRequest.md | 16 +- .../protos/TransferOwnershipResponse.md | 8 +- .../google/generativeai/protos/TunedModel.md | 82 +- .../generativeai/protos/TunedModelSource.md | 20 +- .../generativeai/protos/TuningExample.md | 18 +- .../generativeai/protos/TuningExamples.md | 14 +- .../generativeai/protos/TuningSnapshot.md | 26 +- .../google/generativeai/protos/TuningTask.md | 30 +- docs/api/google/generativeai/protos/Type.md | 146 +- .../protos/UpdateCachedContentRequest.md | 16 +- .../generativeai/protos/UpdateChunkRequest.md | 16 +- .../protos/UpdateCorpusRequest.md | 16 +- .../protos/UpdateDocumentRequest.md | 16 +- .../protos/UpdatePermissionRequest.md | 16 +- .../protos/UpdateTunedModelRequest.md | 18 +- .../generativeai/protos/VideoMetadata.md | 12 +- docs/api/google/generativeai/types.md | 51 +- .../generativeai/types/AnyModelNameOptions.md | 4 - .../types/AsyncGenerateContentResponse.md | 43 +- .../google/generativeai/types/AuthorError.md | 27 - .../types/BaseModelNameOptions.md | 4 - .../api/google/generativeai/types/BlobDict.md | 14 +- .../api/google/generativeai/types/BlobType.md | 4 - .../types/BlockedPromptException.md | 10 +- .../generativeai/types/BlockedReason.md | 110 +- .../generativeai/types/BrokenResponseError.md | 10 +- .../types/CallableFunctionDeclaration.md | 35 +- .../google/generativeai/types/ChatResponse.md | 223 -- .../types/CitationMetadataDict.md | 12 +- .../generativeai/types/CitationSourceDict.md | 24 +- .../google/generativeai/types/Completion.md | 97 - .../google/generativeai/types/ContentDict.md | 14 +- .../generativeai/types/ContentFilterDict.md | 16 +- .../google/generativeai/types/ContentType.md | 4 - .../google/generativeai/types/ContentsType.md | 4 - .../google/generativeai/types/ExampleDict.md | 27 - .../generativeai/types/ExampleOptions.md | 26 - .../generativeai/types/ExamplesOptions.md | 27 - docs/api/google/generativeai/types/File.md | 58 +- .../google/generativeai/types/FileDataDict.md | 12 +- .../google/generativeai/types/FileDataType.md | 4 - .../generativeai/types/FunctionDeclaration.md | 30 +- .../types/FunctionDeclarationType.md | 4 - .../generativeai/types/FunctionLibrary.md | 20 +- .../generativeai/types/FunctionLibraryType.md | 7 +- .../types/GenerateContentResponse.md | 46 +- .../generativeai/types/GenerationConfig.md | 202 +- .../types/GenerationConfigDict.md | 14 +- .../types/GenerationConfigType.md | 4 - .../generativeai/types/HarmBlockThreshold.md | 152 +- .../google/generativeai/types/HarmCategory.md | 106 +- .../generativeai/types/HarmProbability.md | 128 +- .../types/IncompleteIterationError.md | 10 +- .../google/generativeai/types/MessageDict.md | 27 - .../generativeai/types/MessageOptions.md | 25 - .../generativeai/types/MessagePromptDict.md | 27 - .../types/MessagePromptOptions.md | 27 - .../generativeai/types/MessagesOptions.md | 26 - docs/api/google/generativeai/types/Model.md | 78 +- .../generativeai/types/ModelsIterable.md | 4 - .../api/google/generativeai/types/PartDict.md | 14 +- .../api/google/generativeai/types/PartType.md | 4 - .../google/generativeai/types/Permission.md | 50 +- .../google/generativeai/types/Permissions.md | 76 +- .../generativeai/types/RequestOptions.md | 34 +- .../generativeai/types/RequestOptionsType.md | 4 - .../google/generativeai/types/ResponseDict.md | 27 - .../generativeai/types/SafetyFeedbackDict.md | 16 +- .../generativeai/types/SafetyRatingDict.md | 20 +- .../generativeai/types/SafetySettingDict.md | 16 +- docs/api/google/generativeai/types/Status.md | 20 +- .../types/StopCandidateException.md | 10 +- .../generativeai/types/StrictContentType.md | 4 - docs/api/google/generativeai/types/Tool.md | 43 +- .../api/google/generativeai/types/ToolDict.md | 14 +- .../google/generativeai/types/ToolsType.md | 7 +- .../google/generativeai/types/TunedModel.md | 149 +- .../types/TunedModelNameOptions.md | 4 - .../generativeai/types/TunedModelState.md | 119 +- .../google/generativeai/types/TypedDict.md | 10 +- .../types/get_default_file_client.md | 8 +- .../google/generativeai/types/to_file_data.md | 6 +- .../google/generativeai/update_tuned_model.md | 8 +- docs/api/google/generativeai/upload_file.md | 38 +- docs/build_docs.py | 36 +- 273 files changed, 8604 insertions(+), 5571 deletions(-) create mode 100644 docs/api/google/generativeai/caching.md create mode 100644 docs/api/google/generativeai/caching/CachedContent.md create mode 100644 docs/api/google/generativeai/caching/get_default_cache_client.md delete mode 100644 docs/api/google/generativeai/chat.md delete mode 100644 docs/api/google/generativeai/chat_async.md delete mode 100644 docs/api/google/generativeai/count_message_tokens.md delete mode 100644 docs/api/google/generativeai/count_text_tokens.md delete mode 100644 docs/api/google/generativeai/generate_embeddings.md delete mode 100644 docs/api/google/generativeai/generate_text.md create mode 100644 docs/api/google/generativeai/protos/DynamicRetrievalConfig.md create mode 100644 docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md create mode 100644 docs/api/google/generativeai/protos/GoogleSearchRetrieval.md create mode 100644 docs/api/google/generativeai/protos/GroundingChunk.md create mode 100644 docs/api/google/generativeai/protos/GroundingChunk/Web.md create mode 100644 docs/api/google/generativeai/protos/GroundingMetadata.md create mode 100644 docs/api/google/generativeai/protos/GroundingSupport.md create mode 100644 docs/api/google/generativeai/protos/LogprobsResult.md create mode 100644 docs/api/google/generativeai/protos/LogprobsResult/Candidate.md create mode 100644 docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md create mode 100644 docs/api/google/generativeai/protos/PredictRequest.md create mode 100644 docs/api/google/generativeai/protos/PredictResponse.md create mode 100644 docs/api/google/generativeai/protos/RetrievalMetadata.md create mode 100644 docs/api/google/generativeai/protos/SearchEntryPoint.md create mode 100644 docs/api/google/generativeai/protos/Segment.md delete mode 100644 docs/api/google/generativeai/types/AuthorError.md delete mode 100644 docs/api/google/generativeai/types/ChatResponse.md delete mode 100644 docs/api/google/generativeai/types/Completion.md delete mode 100644 docs/api/google/generativeai/types/ExampleDict.md delete mode 100644 docs/api/google/generativeai/types/ExampleOptions.md delete mode 100644 docs/api/google/generativeai/types/ExamplesOptions.md delete mode 100644 docs/api/google/generativeai/types/MessageDict.md delete mode 100644 docs/api/google/generativeai/types/MessageOptions.md delete mode 100644 docs/api/google/generativeai/types/MessagePromptDict.md delete mode 100644 docs/api/google/generativeai/types/MessagePromptOptions.md delete mode 100644 docs/api/google/generativeai/types/MessagesOptions.md delete mode 100644 docs/api/google/generativeai/types/ResponseDict.md diff --git a/docs/api/google/generativeai.md b/docs/api/google/generativeai.md index 23ee47866..5b3931f08 100644 --- a/docs/api/google/generativeai.md +++ b/docs/api/google/generativeai.md @@ -1,17 +1,9 @@ -description: Google AI Python SDK - -

# Module: google.generativeai - + diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md index ac58e2e4b..442c59e02 100644 --- a/docs/api/google/generativeai/ChatSession.md +++ b/docs/api/google/generativeai/ChatSession.md @@ -1,19 +1,9 @@ -description: Contains an ongoing conversation with the model. - -
- - - - - - -
# google.generativeai.ChatSession - + @@ -84,17 +82,25 @@ A chat history to initialize the object with. + `history`
+ + The chat history. + + `last` + + returns the last received `genai.GenerateContentResponse` + @@ -167,31 +173,47 @@ Like + `content` + + The message contents. + + `generation_config` + + Overrides for the model's generation config. + + `safety_settings` + + Overrides for the model's safety settings. + + `stream` + + If True, yield response chunks as they are generated. + diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md index 71f293ebe..f9b0ccb7d 100644 --- a/docs/api/google/generativeai/GenerativeModel.md +++ b/docs/api/google/generativeai/GenerativeModel.md @@ -1,22 +1,9 @@ -description: The genai.GenerativeModel class wraps default parameters for calls to GenerativeModel.generate_content, GenerativeModel.count_tokens, and GenerativeModel.start_chat. - -
- - - - - - - - - -
# google.generativeai.GenerativeModel - + @@ -128,17 +127,25 @@ use. + `cached_content` + + + + `model_name` + + + @@ -209,24 +216,36 @@ Creates a model with `cached_content` as model's context. + `cached_content` + + context for the model. + + `generation_config` + + Overrides for the model's generation config. + + `safety_settings` + + Overrides for the model's safety settings. + @@ -239,7 +258,9 @@ Overrides for the model's safety settings. Returns + `GenerativeModel` object with `cached_content` as its context. + @@ -323,45 +344,69 @@ But note that an `Iterable[protos.Part]` is taken as the parts of a single messa + `contents` + + The contents serving as the model's prompt. + + `generation_config` + + Overrides for the model's generation config. + + `safety_settings` + + Overrides for the model's safety settings. + + `stream` + + If True, yield response chunks as they are generated. + + `tools` + + `protos.Tools` more info coming soon. + + `request_options` + + Options for the request. + @@ -415,10 +460,14 @@ Returns a `genai.ChatSession` attached to this model. + `history` + + An iterable of protos.Content objects, or equivalents to initialize the session. + diff --git a/docs/api/google/generativeai/_api_cache.json b/docs/api/google/generativeai/_api_cache.json index a1d446e53..518937a44 100644 --- a/docs/api/google/generativeai/_api_cache.json +++ b/docs/api/google/generativeai/_api_cache.json @@ -1,149 +1,154 @@ { "duplicate_of": { - "google.generativeai.ChatSession.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.ChatSession.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.ChatSession.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.ChatSession.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.ChatSession.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.ChatSession.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.ChatSession.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.ChatSession.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.ChatSession.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.ChatSession.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.ChatSession.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.ChatSession.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.ChatSession.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.ChatSession.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.GenerationConfig": "google.generativeai.types.GenerationConfig", "google.generativeai.GenerationConfig.__eq__": "google.generativeai.types.GenerationConfig.__eq__", - "google.generativeai.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.GenerationConfig.__init__": "google.generativeai.types.GenerationConfig.__init__", - "google.generativeai.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.GenerationConfig.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.GenerativeModel.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.GenerativeModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.GenerativeModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.GenerativeModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.GenerativeModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.GenerativeModel.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.GenerativeModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.annotations": "google.generativeai.types.annotations", + "google.generativeai.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.GenerationConfig.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.GenerativeModel.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.GenerativeModel.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.GenerativeModel.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.GenerativeModel.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.GenerativeModel.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.GenerativeModel.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.GenerativeModel.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.annotations": "google.generativeai.caching.annotations", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.AttributionSourceId.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.AttributionSourceId.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.AttributionSourceId.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.AttributionSourceId.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.AttributionSourceId.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.AttributionSourceId.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.AttributionSourceId.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.AttributionSourceId.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.AttributionSourceId.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.AttributionSourceId.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.AttributionSourceId.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.AttributionSourceId.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.AttributionSourceId.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchCreateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchCreateChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchCreateChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchCreateChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchCreateChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchCreateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchCreateChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchCreateChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchCreateChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchCreateChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchCreateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchCreateChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchCreateChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchCreateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchCreateChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchCreateChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchCreateChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchCreateChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchCreateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchCreateChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchCreateChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchCreateChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchCreateChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchCreateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchCreateChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchCreateChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchDeleteChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchDeleteChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchDeleteChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchDeleteChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchDeleteChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchEmbedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchEmbedContentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedContentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchEmbedContentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedContentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchEmbedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchEmbedContentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedContentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchEmbedContentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedContentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchEmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchEmbedTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchEmbedTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchEmbedTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchEmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchEmbedTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchEmbedTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchEmbedTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchEmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchEmbedTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchEmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchEmbedTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchEmbedTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchEmbedTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchEmbedTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchEmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchEmbedTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchEmbedTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchEmbedTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchEmbedTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchEmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchEmbedTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchEmbedTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchUpdateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchUpdateChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchUpdateChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchUpdateChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchUpdateChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.BatchUpdateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.BatchUpdateChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.BatchUpdateChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.BatchUpdateChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.BatchUpdateChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Blob.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Blob.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Blob.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Blob.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Blob.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Blob.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Blob.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Blob.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Blob.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Blob.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Blob.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Blob.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Blob.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CachedContent.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CachedContent.UsageMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CachedContent.UsageMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CachedContent.UsageMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CachedContent.UsageMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CachedContent.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CachedContent.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CachedContent.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CachedContent.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CachedContent.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CachedContent.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CachedContent.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CachedContent.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CachedContent.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CachedContent.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CachedContent.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CachedContent.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CachedContent.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Candidate.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Candidate.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Candidate.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Candidate.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Candidate.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Candidate.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Candidate.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Candidate.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Candidate.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Candidate.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Chunk.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.Chunk.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.Chunk.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -187,49 +192,50 @@ "google.generativeai.protos.Chunk.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.Chunk.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.Chunk.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Chunk.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.Chunk.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.Chunk.State.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.Chunk.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.Chunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Chunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Chunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Chunk.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Chunk.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Chunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Chunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Chunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Chunk.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Chunk.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Chunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Chunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Chunk.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ChunkData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ChunkData.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ChunkData.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ChunkData.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ChunkData.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ChunkData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ChunkData.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ChunkData.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ChunkData.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ChunkData.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ChunkData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ChunkData.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ChunkData.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CitationMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CitationMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CitationMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CitationMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CitationMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CitationMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CitationMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CitationMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CitationMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CitationMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CitationMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CitationMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CitationMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CitationSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CitationSource.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CitationSource.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CitationSource.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CitationSource.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CitationSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CitationSource.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CitationSource.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CitationSource.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CitationSource.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CitationSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CitationSource.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CitationSource.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CodeExecution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CodeExecution.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CodeExecution.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CodeExecution.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CodeExecution.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CodeExecution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CodeExecution.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CodeExecution.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CodeExecution.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CodeExecution.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CodeExecution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CodeExecution.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CodeExecution.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -273,17 +279,18 @@ "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.CodeExecutionResult.Outcome.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.CodeExecutionResult.Outcome.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.CodeExecutionResult.Outcome.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.CodeExecutionResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CodeExecutionResult.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CodeExecutionResult.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CodeExecutionResult.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CodeExecutionResult.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CodeExecutionResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CodeExecutionResult.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CodeExecutionResult.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CodeExecutionResult.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CodeExecutionResult.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CodeExecutionResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CodeExecutionResult.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CodeExecutionResult.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Condition.Operator.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.Condition.Operator.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.Condition.Operator.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -327,33 +334,34 @@ "google.generativeai.protos.Condition.Operator.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.Condition.Operator.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.Condition.Operator.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Condition.Operator.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.Condition.Operator.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.Condition.Operator.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.Condition.Operator.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.Condition.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Condition.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Condition.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Condition.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Condition.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Condition.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Condition.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Condition.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Condition.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Condition.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Condition.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Condition.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Condition.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Content.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Content.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Content.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Content.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Content.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Content.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Content.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Content.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Content.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Content.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Content.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Content.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Content.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ContentEmbedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ContentEmbedding.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ContentEmbedding.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ContentEmbedding.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ContentEmbedding.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ContentEmbedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ContentEmbedding.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ContentEmbedding.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ContentEmbedding.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ContentEmbedding.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ContentEmbedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ContentEmbedding.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ContentEmbedding.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ContentFilter.BlockedReason": "google.generativeai.types.BlockedReason", "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.ContentFilter.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", @@ -398,273 +406,329 @@ "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.ContentFilter.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.ContentFilter.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.ContentFilter.BlockedReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.ContentFilter.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.ContentFilter.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.ContentFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ContentFilter.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ContentFilter.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ContentFilter.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ContentFilter.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ContentFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ContentFilter.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ContentFilter.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ContentFilter.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ContentFilter.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ContentFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ContentFilter.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ContentFilter.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Corpus.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Corpus.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Corpus.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Corpus.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Corpus.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Corpus.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Corpus.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Corpus.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Corpus.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Corpus.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Corpus.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Corpus.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Corpus.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CountMessageTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CountMessageTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CountMessageTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountMessageTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CountMessageTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CountMessageTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CountMessageTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CountMessageTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountMessageTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CountMessageTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CountMessageTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CountMessageTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountMessageTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CountMessageTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CountMessageTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CountMessageTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountMessageTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CountMessageTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CountMessageTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CountMessageTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CountMessageTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountMessageTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CountMessageTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CountMessageTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CountMessageTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountMessageTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CountTextTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CountTextTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CountTextTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTextTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CountTextTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CountTextTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CountTextTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CountTextTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTextTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CountTextTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CountTextTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CountTextTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTextTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CountTextTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CountTextTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CountTextTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTextTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CountTextTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CountTextTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CountTextTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CountTextTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTextTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CountTextTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CountTextTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CountTextTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTextTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CountTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CountTokensRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CountTokensRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CountTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CountTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CountTokensRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CountTokensRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CountTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CountTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CountTokensRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CountTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CountTokensResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CountTokensResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CountTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CountTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CountTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CountTokensResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CountTokensResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CountTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CountTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CountTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CountTokensResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CountTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateFileResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateFileResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateFileResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateFileResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateFileResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateFileResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateFileResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateFileResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateFileResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateFileResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateFileResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateFileResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateFileResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreatePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreatePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreatePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreatePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreatePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreatePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreatePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreatePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreatePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreatePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateTunedModelMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateTunedModelMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateTunedModelMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateTunedModelMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateTunedModelMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateTunedModelMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateTunedModelMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateTunedModelMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateTunedModelMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateTunedModelMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateTunedModelMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateTunedModelMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateTunedModelMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CreateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CreateTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CreateTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CreateTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CreateTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CreateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CreateTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CreateTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CreateTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CreateTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CreateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CreateTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CreateTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.CustomMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.CustomMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.CustomMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.CustomMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.CustomMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.CustomMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.CustomMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.CustomMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.CustomMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.CustomMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.CustomMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.CustomMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.CustomMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Dataset.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Dataset.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Dataset.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Dataset.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Dataset.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Dataset.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Dataset.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Dataset.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Dataset.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Dataset.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Dataset.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Dataset.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Dataset.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeleteCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeleteCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeleteCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeleteCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeleteCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeleteCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeleteCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeleteCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeleteCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeleteCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeleteChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeleteChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeleteChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeleteChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeleteChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeleteChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeleteChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeleteChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeleteChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeleteChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeleteCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeleteCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeleteCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeleteCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeleteCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeleteCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeleteCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeleteCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeleteCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeleteCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeleteDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeleteDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeleteDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeleteDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeleteDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeleteDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeleteDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeleteDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeleteDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeleteDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeleteFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeleteFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeleteFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeleteFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeleteFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeleteFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeleteFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeleteFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeleteFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeleteFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeletePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeletePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeletePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeletePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeletePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeletePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeletePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeletePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeletePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeletePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeletePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeletePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeletePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.DeleteTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.DeleteTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.DeleteTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.DeleteTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DeleteTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.DeleteTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.DeleteTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.DeleteTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.DeleteTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DeleteTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.DeleteTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.DeleteTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.DeleteTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Document.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Document.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Document.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Document.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Document.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Document.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Document.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Document.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Document.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Document.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Document.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Document.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Document.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.protos.DynamicRetrievalConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.DynamicRetrievalConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.DynamicRetrievalConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.DynamicRetrievalConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.DynamicRetrievalConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.DynamicRetrievalConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.DynamicRetrievalConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.DynamicRetrievalConfig.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.EmbedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.EmbedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.EmbedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.EmbedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.EmbedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.EmbedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.EmbedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.EmbedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.EmbedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.EmbedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.EmbedContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.EmbedContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.EmbedContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.EmbedContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.EmbedContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.EmbedContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.EmbedContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.EmbedContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.EmbedContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.EmbedContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.EmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.EmbedTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.EmbedTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.EmbedTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.EmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.EmbedTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.EmbedTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.EmbedTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.EmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.EmbedTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.EmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.EmbedTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.EmbedTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.EmbedTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.EmbedTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.EmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.EmbedTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.EmbedTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.EmbedTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.EmbedTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.EmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.EmbedTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.EmbedTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Embedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Embedding.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Embedding.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Embedding.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Embedding.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Embedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Embedding.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Embedding.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Embedding.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Embedding.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Embedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Embedding.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Embedding.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Example.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Example.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Example.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Example.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Example.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Example.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Example.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Example.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Example.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Example.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Example.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Example.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Example.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ExecutableCode.Language.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.ExecutableCode.Language.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.ExecutableCode.Language.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -708,17 +772,18 @@ "google.generativeai.protos.ExecutableCode.Language.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.ExecutableCode.Language.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.ExecutableCode.Language.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.ExecutableCode.Language.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.ExecutableCode.Language.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.ExecutableCode.Language.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.ExecutableCode.Language.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.ExecutableCode.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ExecutableCode.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ExecutableCode.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ExecutableCode.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ExecutableCode.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ExecutableCode.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ExecutableCode.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ExecutableCode.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ExecutableCode.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ExecutableCode.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ExecutableCode.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ExecutableCode.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ExecutableCode.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.File.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.File.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.File.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -762,33 +827,34 @@ "google.generativeai.protos.File.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.File.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.File.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.File.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.File.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.File.State.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.File.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.File.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.File.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.File.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.File.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.File.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.File.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.File.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.File.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.File.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.File.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.File.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.File.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.File.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.FileData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.FileData.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.FileData.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FileData.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.FileData.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.FileData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.FileData.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.FileData.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FileData.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.FileData.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.FileData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.FileData.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FileData.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.FunctionCall.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.FunctionCall.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.FunctionCall.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionCall.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.FunctionCall.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.FunctionCall.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.FunctionCall.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.FunctionCall.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionCall.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.FunctionCall.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.FunctionCall.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.FunctionCall.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionCall.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -832,33 +898,34 @@ "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.FunctionCallingConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.FunctionCallingConfig.Mode.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.FunctionCallingConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.FunctionCallingConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.FunctionCallingConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.FunctionCallingConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionCallingConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.FunctionCallingConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.FunctionCallingConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.FunctionCallingConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.FunctionCallingConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionCallingConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.FunctionCallingConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.FunctionCallingConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.FunctionCallingConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionCallingConfig.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.FunctionDeclaration.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.FunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.FunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.FunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.FunctionDeclaration.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.FunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.FunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.FunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.FunctionDeclaration.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.FunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.FunctionResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.FunctionResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.FunctionResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.FunctionResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.FunctionResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.FunctionResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.FunctionResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.FunctionResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.FunctionResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.FunctionResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.FunctionResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.FunctionResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.FunctionResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -902,17 +969,18 @@ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.GenerateAnswerRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateAnswerRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateAnswerRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateAnswerRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateAnswerRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateAnswerRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateAnswerRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateAnswerRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateAnswerRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateAnswerRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateAnswerRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateAnswerRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateAnswerRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -956,33 +1024,34 @@ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateAnswerResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateAnswerResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateAnswerResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateAnswerResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateAnswerResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateAnswerResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateAnswerResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateAnswerResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateAnswerResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateAnswerResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateAnswerResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateAnswerResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateAnswerResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -1026,161 +1095,202 @@ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateMessageRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateMessageRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateMessageRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateMessageRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateMessageRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateMessageRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateMessageRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateMessageRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateMessageRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateMessageRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateMessageRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateMessageRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateMessageRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateMessageResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateMessageResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateMessageResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateMessageResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateMessageResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateMessageResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateMessageResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateMessageResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateMessageResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateMessageResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateMessageResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateMessageResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateMessageResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateTextRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateTextRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateTextRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateTextRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateTextRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerateTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerateTextResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerateTextResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerateTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerateTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerateTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerateTextResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerateTextResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerateTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerateTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerateTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerateTextResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerateTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GenerationConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GenerationConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GenerationConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetFileRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetFileRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetFileRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetFileRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetFileRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetPermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetPermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetPermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetPermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetPermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetPermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetPermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetPermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetPermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetPermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetPermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetPermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetPermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GetTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GetTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GetTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GetTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GetTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GetTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GetTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GetTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GetTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GetTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GetTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GetTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GetTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.GoogleSearchRetrieval.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GoogleSearchRetrieval.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GoogleSearchRetrieval.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.GoogleSearchRetrieval.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GoogleSearchRetrieval.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GoogleSearchRetrieval.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.GoogleSearchRetrieval.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GoogleSearchRetrieval.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GroundingAttribution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GroundingAttribution.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GroundingAttribution.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GroundingAttribution.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingAttribution.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GroundingAttribution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GroundingAttribution.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GroundingAttribution.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GroundingAttribution.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingAttribution.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GroundingAttribution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GroundingAttribution.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GroundingAttribution.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.GroundingChunk.Web.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingChunk.Web.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingChunk.Web.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.GroundingChunk.Web.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingChunk.Web.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingChunk.Web.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.GroundingChunk.Web.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingChunk.Web.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.GroundingChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.GroundingChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingChunk.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.GroundingChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingChunk.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.GroundingMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.GroundingMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.GroundingMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GroundingPassage.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GroundingPassage.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GroundingPassage.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GroundingPassage.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingPassage.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GroundingPassage.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GroundingPassage.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GroundingPassage.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GroundingPassage.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingPassage.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GroundingPassage.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GroundingPassage.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GroundingPassage.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.GroundingPassages.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.GroundingPassages.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.GroundingPassages.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.GroundingPassages.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingPassages.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.GroundingPassages.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.GroundingPassages.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.GroundingPassages.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.GroundingPassages.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingPassages.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.GroundingPassages.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.GroundingPassages.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.GroundingPassages.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.GroundingSupport.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.GroundingSupport.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.GroundingSupport.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.GroundingSupport.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.GroundingSupport.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.GroundingSupport.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.GroundingSupport.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.GroundingSupport.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -1224,185 +1334,210 @@ "google.generativeai.protos.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.HarmCategory.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.Hyperparameters.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Hyperparameters.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Hyperparameters.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Hyperparameters.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Hyperparameters.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Hyperparameters.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Hyperparameters.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Hyperparameters.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Hyperparameters.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Hyperparameters.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Hyperparameters.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Hyperparameters.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Hyperparameters.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListCachedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListCachedContentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListCachedContentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCachedContentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListCachedContentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListCachedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListCachedContentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListCachedContentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCachedContentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListCachedContentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListCachedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListCachedContentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCachedContentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListCachedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListCachedContentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListCachedContentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCachedContentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListCachedContentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListCachedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListCachedContentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListCachedContentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCachedContentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListCachedContentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListCachedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListCachedContentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCachedContentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListChunksRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListChunksRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListChunksRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListChunksRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListChunksRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListChunksResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListChunksResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListChunksResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListChunksResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListChunksResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListCorporaRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListCorporaRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListCorporaRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCorporaRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListCorporaRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListCorporaRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListCorporaRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListCorporaRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCorporaRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListCorporaRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListCorporaRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListCorporaRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCorporaRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListCorporaResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListCorporaResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListCorporaResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListCorporaResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListCorporaResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListCorporaResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListCorporaResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListCorporaResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListCorporaResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListCorporaResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListCorporaResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListCorporaResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListCorporaResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListDocumentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListDocumentsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListDocumentsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListDocumentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListDocumentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListDocumentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListDocumentsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListDocumentsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListDocumentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListDocumentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListDocumentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListDocumentsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListDocumentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListDocumentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListDocumentsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListDocumentsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListDocumentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListDocumentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListDocumentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListDocumentsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListDocumentsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListDocumentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListDocumentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListDocumentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListDocumentsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListDocumentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListFilesRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListFilesRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListFilesRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListFilesRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListFilesRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListFilesRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListFilesRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListFilesRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListFilesRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListFilesRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListFilesRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListFilesRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListFilesRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListFilesResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListFilesResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListFilesResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListFilesResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListFilesResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListFilesResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListFilesResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListFilesResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListFilesResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListFilesResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListFilesResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListFilesResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListFilesResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListModelsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListModelsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListModelsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListModelsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListModelsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListModelsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListModelsRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListModelsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListModelsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListModelsRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListModelsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListModelsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListModelsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListModelsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListModelsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListModelsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListModelsResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListModelsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListModelsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListModelsResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListPermissionsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListPermissionsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListPermissionsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListPermissionsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListPermissionsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListPermissionsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListPermissionsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListPermissionsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListPermissionsRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListPermissionsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListPermissionsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListPermissionsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListPermissionsRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListPermissionsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListPermissionsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListPermissionsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListPermissionsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListPermissionsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListPermissionsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListPermissionsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListPermissionsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListPermissionsResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListPermissionsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListPermissionsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListPermissionsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListPermissionsResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListTunedModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListTunedModelsRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListTunedModelsRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListTunedModelsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListTunedModelsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListTunedModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListTunedModelsRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListTunedModelsRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListTunedModelsRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListTunedModelsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListTunedModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListTunedModelsRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListTunedModelsRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ListTunedModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ListTunedModelsResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ListTunedModelsResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ListTunedModelsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ListTunedModelsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ListTunedModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ListTunedModelsResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ListTunedModelsResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ListTunedModelsResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ListTunedModelsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ListTunedModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ListTunedModelsResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ListTunedModelsResponse.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.LogprobsResult.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.LogprobsResult.Candidate.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.LogprobsResult.Candidate.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.LogprobsResult.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.LogprobsResult.Candidate.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.LogprobsResult.Candidate.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.LogprobsResult.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.LogprobsResult.Candidate.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.LogprobsResult.TopCandidates.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.LogprobsResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.LogprobsResult.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.LogprobsResult.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.LogprobsResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.LogprobsResult.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.LogprobsResult.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.LogprobsResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.LogprobsResult.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Message.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Message.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Message.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Message.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Message.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Message.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Message.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Message.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Message.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Message.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Message.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Message.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Message.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.MessagePrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.MessagePrompt.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.MessagePrompt.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.MessagePrompt.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.MessagePrompt.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.MessagePrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.MessagePrompt.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.MessagePrompt.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.MessagePrompt.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.MessagePrompt.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.MessagePrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.MessagePrompt.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.MessagePrompt.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.MetadataFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.MetadataFilter.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.MetadataFilter.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.MetadataFilter.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.MetadataFilter.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.MetadataFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.MetadataFilter.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.MetadataFilter.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.MetadataFilter.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.MetadataFilter.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.MetadataFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.MetadataFilter.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.MetadataFilter.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Model.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Model.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Model.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Model.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Model.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Model.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Model.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Model.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Model.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Model.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Model.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Model.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Model.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Part.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Part.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Part.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Part.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Part.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Part.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Part.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Part.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Part.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Part.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Part.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Part.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Part.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Permission.GranteeType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.Permission.GranteeType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.Permission.GranteeType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -1446,6 +1581,7 @@ "google.generativeai.protos.Permission.GranteeType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.Permission.GranteeType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.Permission.GranteeType.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Permission.GranteeType.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.Permission.GranteeType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.Permission.GranteeType.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.Permission.GranteeType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", @@ -1492,65 +1628,90 @@ "google.generativeai.protos.Permission.Role.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.Permission.Role.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.Permission.Role.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Permission.Role.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.Permission.Role.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.Permission.Role.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.Permission.Role.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.Permission.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Permission.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Permission.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Permission.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Permission.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Permission.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Permission.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Permission.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Permission.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Permission.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Permission.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Permission.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Permission.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.PredictRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.PredictRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.PredictRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.PredictRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.PredictRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.PredictRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.PredictRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.PredictRequest.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.PredictResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.PredictResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.PredictResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.PredictResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.PredictResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.PredictResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.PredictResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.PredictResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.QueryCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.QueryCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.QueryCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.QueryCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.QueryCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.QueryCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.QueryCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.QueryCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.QueryCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.QueryCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.QueryCorpusResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.QueryCorpusResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.QueryCorpusResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryCorpusResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.QueryCorpusResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.QueryCorpusResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.QueryCorpusResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.QueryCorpusResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryCorpusResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.QueryCorpusResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.QueryCorpusResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.QueryCorpusResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryCorpusResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.QueryDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.QueryDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.QueryDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.QueryDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.QueryDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.QueryDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.QueryDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.QueryDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.QueryDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.QueryDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.QueryDocumentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.QueryDocumentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.QueryDocumentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.QueryDocumentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.QueryDocumentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.QueryDocumentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.QueryDocumentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.QueryDocumentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.QueryDocumentResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.QueryDocumentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.QueryDocumentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.QueryDocumentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.QueryDocumentResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.RelevantChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.RelevantChunk.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.RelevantChunk.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.RelevantChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.RelevantChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.RelevantChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.RelevantChunk.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.RelevantChunk.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.RelevantChunk.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.RelevantChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.RelevantChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.RelevantChunk.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.RelevantChunk.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.RetrievalMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.RetrievalMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.RetrievalMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.RetrievalMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.RetrievalMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.RetrievalMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.RetrievalMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.RetrievalMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.SafetyFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.SafetyFeedback.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.SafetyFeedback.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SafetyFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.SafetyFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.SafetyFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.SafetyFeedback.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.SafetyFeedback.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SafetyFeedback.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.SafetyFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.SafetyFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.SafetyFeedback.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SafetyFeedback.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.SafetyRating.HarmProbability": "google.generativeai.types.HarmProbability", "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.SafetyRating.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", @@ -1595,17 +1756,18 @@ "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.SafetyRating.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.SafetyRating.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.SafetyRating.HarmProbability.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.SafetyRating.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.SafetyRating.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.SafetyRating.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.SafetyRating.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.SafetyRating.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SafetyRating.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.SafetyRating.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.SafetyRating.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.SafetyRating.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.SafetyRating.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SafetyRating.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.SafetyRating.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.SafetyRating.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.SafetyRating.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SafetyRating.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.SafetySetting.HarmBlockThreshold": "google.generativeai.types.HarmBlockThreshold", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", @@ -1650,49 +1812,66 @@ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.SafetySetting.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.SafetySetting.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.SafetySetting.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SafetySetting.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.SafetySetting.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.SafetySetting.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.SafetySetting.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.SafetySetting.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SafetySetting.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.SafetySetting.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.SafetySetting.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.SafetySetting.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SafetySetting.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Schema.PropertiesEntry.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Schema.PropertiesEntry.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Schema.PropertiesEntry.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Schema.PropertiesEntry.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Schema.PropertiesEntry.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Schema.PropertiesEntry.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Schema.PropertiesEntry.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Schema.PropertiesEntry.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Schema.PropertiesEntry.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Schema.PropertiesEntry.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Schema.PropertiesEntry.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Schema.PropertiesEntry.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Schema.PropertiesEntry.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Schema.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Schema.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Schema.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Schema.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Schema.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Schema.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Schema.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Schema.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Schema.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Schema.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Schema.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Schema.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Schema.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.SearchEntryPoint.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.SearchEntryPoint.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.SearchEntryPoint.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.SearchEntryPoint.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.SearchEntryPoint.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.SearchEntryPoint.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.SearchEntryPoint.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.SearchEntryPoint.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.protos.Segment.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", + "google.generativeai.protos.Segment.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Segment.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.protos.Segment.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", + "google.generativeai.protos.Segment.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Segment.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.protos.Segment.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", + "google.generativeai.protos.Segment.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.SemanticRetrieverConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.SemanticRetrieverConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.SemanticRetrieverConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.SemanticRetrieverConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.SemanticRetrieverConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.SemanticRetrieverConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.SemanticRetrieverConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.SemanticRetrieverConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.SemanticRetrieverConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.SemanticRetrieverConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.SemanticRetrieverConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.SemanticRetrieverConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.SemanticRetrieverConfig.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.StringList.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.StringList.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.StringList.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.StringList.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.StringList.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.StringList.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.StringList.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.StringList.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.StringList.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.StringList.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.StringList.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.StringList.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.StringList.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TaskType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.TaskType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.TaskType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -1736,57 +1915,58 @@ "google.generativeai.protos.TaskType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.TaskType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.TaskType.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.TaskType.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.TaskType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.TaskType.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.TaskType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.TextCompletion.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TextCompletion.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TextCompletion.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TextCompletion.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TextCompletion.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TextCompletion.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TextCompletion.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TextCompletion.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TextCompletion.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TextCompletion.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TextCompletion.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TextCompletion.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TextCompletion.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TextPrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TextPrompt.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TextPrompt.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TextPrompt.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TextPrompt.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TextPrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TextPrompt.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TextPrompt.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TextPrompt.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TextPrompt.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TextPrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TextPrompt.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TextPrompt.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Tool.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.Tool.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.Tool.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.Tool.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.Tool.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.Tool.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.Tool.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.Tool.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.Tool.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.Tool.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.Tool.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.Tool.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.Tool.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.ToolConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.ToolConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.ToolConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.ToolConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.ToolConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.ToolConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.ToolConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.ToolConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.ToolConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.ToolConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.ToolConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.ToolConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.ToolConfig.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TransferOwnershipRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TransferOwnershipRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TransferOwnershipRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TransferOwnershipRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TransferOwnershipRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TransferOwnershipRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TransferOwnershipRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TransferOwnershipRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TransferOwnershipRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TransferOwnershipRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TransferOwnershipRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TransferOwnershipRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TransferOwnershipRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TransferOwnershipResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TransferOwnershipResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TransferOwnershipResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TransferOwnershipResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TransferOwnershipResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TransferOwnershipResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TransferOwnershipResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TransferOwnershipResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TransferOwnershipResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TransferOwnershipResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TransferOwnershipResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TransferOwnershipResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TransferOwnershipResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TunedModel.State": "google.generativeai.types.TunedModelState", "google.generativeai.protos.TunedModel.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.TunedModel.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", @@ -1831,57 +2011,58 @@ "google.generativeai.protos.TunedModel.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.TunedModel.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.TunedModel.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.TunedModel.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.TunedModel.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.TunedModel.State.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.TunedModel.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.TunedModel.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TunedModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TunedModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TunedModel.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TunedModel.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TunedModel.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TunedModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TunedModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TunedModel.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TunedModel.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TunedModel.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TunedModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TunedModel.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TunedModelSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TunedModelSource.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TunedModelSource.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TunedModelSource.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TunedModelSource.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TunedModelSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TunedModelSource.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TunedModelSource.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TunedModelSource.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TunedModelSource.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TunedModelSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TunedModelSource.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TunedModelSource.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TuningExample.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TuningExample.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TuningExample.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningExample.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TuningExample.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TuningExample.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TuningExample.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TuningExample.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningExample.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TuningExample.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TuningExample.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TuningExample.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningExample.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TuningExamples.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TuningExamples.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TuningExamples.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningExamples.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TuningExamples.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TuningExamples.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TuningExamples.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TuningExamples.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningExamples.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TuningExamples.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TuningExamples.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TuningExamples.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningExamples.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TuningSnapshot.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TuningSnapshot.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TuningSnapshot.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningSnapshot.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TuningSnapshot.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TuningSnapshot.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TuningSnapshot.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TuningSnapshot.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningSnapshot.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TuningSnapshot.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TuningSnapshot.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TuningSnapshot.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningSnapshot.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.TuningTask.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.TuningTask.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.TuningTask.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.TuningTask.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.TuningTask.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.TuningTask.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.TuningTask.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.TuningTask.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.TuningTask.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.TuningTask.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.TuningTask.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.TuningTask.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.TuningTask.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.Type.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.protos.Type.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.protos.Type.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -1925,87 +2106,79 @@ "google.generativeai.protos.Type.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.protos.Type.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.protos.Type.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.protos.Type.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.protos.Type.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.protos.Type.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.protos.Type.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", "google.generativeai.protos.UpdateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.UpdateCachedContentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.UpdateCachedContentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.UpdateCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.UpdateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.UpdateCachedContentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.UpdateCachedContentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.UpdateCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.UpdateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.UpdateCachedContentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.UpdateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.UpdateChunkRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.UpdateChunkRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.UpdateChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.UpdateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.UpdateChunkRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.UpdateChunkRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.UpdateChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.UpdateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.UpdateChunkRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.UpdateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.UpdateCorpusRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.UpdateCorpusRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.UpdateCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.UpdateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.UpdateCorpusRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.UpdateCorpusRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.UpdateCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.UpdateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.UpdateCorpusRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.UpdateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.UpdateDocumentRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.UpdateDocumentRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.UpdateDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.UpdateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.UpdateDocumentRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.UpdateDocumentRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.UpdateDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.UpdateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.UpdateDocumentRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.UpdatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.UpdatePermissionRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.UpdatePermissionRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdatePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.UpdatePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.UpdatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.UpdatePermissionRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.UpdatePermissionRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdatePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.UpdatePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.UpdatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.UpdatePermissionRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdatePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.UpdateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.UpdateTunedModelRequest.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.UpdateTunedModelRequest.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.UpdateTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.UpdateTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.UpdateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.UpdateTunedModelRequest.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.UpdateTunedModelRequest.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.UpdateTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.UpdateTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.UpdateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.UpdateTunedModelRequest.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.protos.UpdateTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.protos.VideoMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__", - "google.generativeai.protos.VideoMetadata.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.protos.VideoMetadata.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.protos.VideoMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.protos.VideoMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.protos.VideoMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__", - "google.generativeai.protos.VideoMetadata.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.protos.VideoMetadata.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", + "google.generativeai.protos.VideoMetadata.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.protos.VideoMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__", "google.generativeai.protos.VideoMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__", - "google.generativeai.protos.VideoMetadata.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.AsyncGenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.AsyncGenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.AsyncGenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.AsyncGenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.AsyncGenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.AuthorError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.AuthorError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.AuthorError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.AuthorError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.AuthorError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.AuthorError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.BlockedPromptException.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.BlockedPromptException.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.BlockedPromptException.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.BlockedPromptException.__init__": "google.generativeai.types.AuthorError.__init__", - "google.generativeai.types.BlockedPromptException.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.BlockedPromptException.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.BlockedPromptException.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.BlockedPromptException.__new__": "google.generativeai.types.AuthorError.__new__", - "google.generativeai.types.BlockedPromptException.add_note": "google.generativeai.types.AuthorError.add_note", - "google.generativeai.types.BlockedPromptException.args": "google.generativeai.types.AuthorError.args", - "google.generativeai.types.BlockedPromptException.with_traceback": "google.generativeai.types.AuthorError.with_traceback", + "google.generativeai.protos.VideoMetadata.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.types.AsyncGenerateContentResponse.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.AsyncGenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.AsyncGenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.AsyncGenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.AsyncGenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.AsyncGenerateContentResponse.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.AsyncGenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.types.BlockedPromptException.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.BlockedPromptException.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.BlockedPromptException.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.BlockedPromptException.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.BlockedPromptException.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.BlockedPromptException.__ne__": "google.generativeai.caching.CachedContent.__ne__", "google.generativeai.types.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.types.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.types.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -2054,38 +2227,33 @@ "google.generativeai.types.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.types.BlockedReason.from_bytes": "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes", "google.generativeai.types.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.BlockedReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.types.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.types.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.types.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", - "google.generativeai.types.BrokenResponseError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.BrokenResponseError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.BrokenResponseError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.BrokenResponseError.__init__": "google.generativeai.types.AuthorError.__init__", - "google.generativeai.types.BrokenResponseError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.BrokenResponseError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.BrokenResponseError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.BrokenResponseError.__new__": "google.generativeai.types.AuthorError.__new__", - "google.generativeai.types.BrokenResponseError.add_note": "google.generativeai.types.AuthorError.add_note", - "google.generativeai.types.BrokenResponseError.args": "google.generativeai.types.AuthorError.args", - "google.generativeai.types.BrokenResponseError.with_traceback": "google.generativeai.types.AuthorError.with_traceback", - "google.generativeai.types.CallableFunctionDeclaration.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.CallableFunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.CallableFunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.CallableFunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.CallableFunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.CallableFunctionDeclaration.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.CallableFunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.BrokenResponseError.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.BrokenResponseError.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.BrokenResponseError.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.BrokenResponseError.__init__": "google.generativeai.types.BlockedPromptException.__init__", + "google.generativeai.types.BrokenResponseError.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.BrokenResponseError.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.BrokenResponseError.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.BrokenResponseError.__new__": "google.generativeai.types.BlockedPromptException.__new__", + "google.generativeai.types.BrokenResponseError.add_note": "google.generativeai.types.BlockedPromptException.add_note", + "google.generativeai.types.BrokenResponseError.args": "google.generativeai.types.BlockedPromptException.args", + "google.generativeai.types.BrokenResponseError.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback", + "google.generativeai.types.CallableFunctionDeclaration.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.CallableFunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.CallableFunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.CallableFunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.CallableFunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.CallableFunctionDeclaration.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.CallableFunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.CallableFunctionDeclaration.description": "google.generativeai.types.FunctionDeclaration.description", "google.generativeai.types.CallableFunctionDeclaration.from_function": "google.generativeai.types.FunctionDeclaration.from_function", "google.generativeai.types.CallableFunctionDeclaration.name": "google.generativeai.types.FunctionDeclaration.name", "google.generativeai.types.CallableFunctionDeclaration.parameters": "google.generativeai.types.FunctionDeclaration.parameters", "google.generativeai.types.CallableFunctionDeclaration.to_proto": "google.generativeai.types.FunctionDeclaration.to_proto", - "google.generativeai.types.ChatResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.ChatResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.ChatResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.ChatResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.ChatResponse.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.ChatResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", "google.generativeai.types.CitationMetadataDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.CitationMetadataDict.__eq__": "google.generativeai.types.BlobDict.__eq__", "google.generativeai.types.CitationMetadataDict.__ge__": "google.generativeai.types.BlobDict.__ge__", @@ -2134,13 +2302,6 @@ "google.generativeai.types.CitationSourceDict.setdefault": "google.generativeai.types.BlobDict.setdefault", "google.generativeai.types.CitationSourceDict.update": "google.generativeai.types.BlobDict.update", "google.generativeai.types.CitationSourceDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.Completion.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.Completion.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.Completion.__init__": "google.generativeai.types.ChatResponse.__init__", - "google.generativeai.types.Completion.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.Completion.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.Completion.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.Completion.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", "google.generativeai.types.ContentDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.ContentDict.__eq__": "google.generativeai.types.BlobDict.__eq__", "google.generativeai.types.ContentDict.__ge__": "google.generativeai.types.BlobDict.__ge__", @@ -2189,37 +2350,13 @@ "google.generativeai.types.ContentFilterDict.setdefault": "google.generativeai.types.BlobDict.setdefault", "google.generativeai.types.ContentFilterDict.update": "google.generativeai.types.BlobDict.update", "google.generativeai.types.ContentFilterDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.ExampleDict.__contains__": "google.generativeai.types.BlobDict.__contains__", - "google.generativeai.types.ExampleDict.__eq__": "google.generativeai.types.BlobDict.__eq__", - "google.generativeai.types.ExampleDict.__ge__": "google.generativeai.types.BlobDict.__ge__", - "google.generativeai.types.ExampleDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", - "google.generativeai.types.ExampleDict.__gt__": "google.generativeai.types.BlobDict.__gt__", - "google.generativeai.types.ExampleDict.__init__": "google.generativeai.types.BlobDict.__init__", - "google.generativeai.types.ExampleDict.__iter__": "google.generativeai.types.BlobDict.__iter__", - "google.generativeai.types.ExampleDict.__le__": "google.generativeai.types.BlobDict.__le__", - "google.generativeai.types.ExampleDict.__len__": "google.generativeai.types.BlobDict.__len__", - "google.generativeai.types.ExampleDict.__lt__": "google.generativeai.types.BlobDict.__lt__", - "google.generativeai.types.ExampleDict.__ne__": "google.generativeai.types.BlobDict.__ne__", - "google.generativeai.types.ExampleDict.__new__": "google.generativeai.types.BlobDict.__new__", - "google.generativeai.types.ExampleDict.__or__": "google.generativeai.types.BlobDict.__or__", - "google.generativeai.types.ExampleDict.__ror__": "google.generativeai.types.BlobDict.__ror__", - "google.generativeai.types.ExampleDict.clear": "google.generativeai.types.BlobDict.clear", - "google.generativeai.types.ExampleDict.copy": "google.generativeai.types.BlobDict.copy", - "google.generativeai.types.ExampleDict.get": "google.generativeai.types.BlobDict.get", - "google.generativeai.types.ExampleDict.items": "google.generativeai.types.BlobDict.items", - "google.generativeai.types.ExampleDict.keys": "google.generativeai.types.BlobDict.keys", - "google.generativeai.types.ExampleDict.pop": "google.generativeai.types.BlobDict.pop", - "google.generativeai.types.ExampleDict.popitem": "google.generativeai.types.BlobDict.popitem", - "google.generativeai.types.ExampleDict.setdefault": "google.generativeai.types.BlobDict.setdefault", - "google.generativeai.types.ExampleDict.update": "google.generativeai.types.BlobDict.update", - "google.generativeai.types.ExampleDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.File.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.File.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.File.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.File.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.File.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.File.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.File.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.File.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.File.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.File.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.File.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.File.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.File.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.File.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.FileDataDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.FileDataDict.__eq__": "google.generativeai.types.BlobDict.__eq__", "google.generativeai.types.FileDataDict.__ge__": "google.generativeai.types.BlobDict.__ge__", @@ -2244,40 +2381,40 @@ "google.generativeai.types.FileDataDict.setdefault": "google.generativeai.types.BlobDict.setdefault", "google.generativeai.types.FileDataDict.update": "google.generativeai.types.BlobDict.update", "google.generativeai.types.FileDataDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.FunctionDeclaration.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.FunctionDeclaration.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.FunctionDeclaration.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.FunctionDeclaration.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.FunctionDeclaration.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.FunctionDeclaration.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.FunctionDeclaration.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.FunctionLibrary.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.FunctionLibrary.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.FunctionLibrary.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.FunctionLibrary.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.FunctionLibrary.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.FunctionLibrary.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.FunctionLibrary.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.GenerateContentResponse.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.GenerateContentResponse.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.GenerateContentResponse.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", + "google.generativeai.types.FunctionDeclaration.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.FunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.FunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.FunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.FunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.FunctionDeclaration.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.FunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.types.FunctionLibrary.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.FunctionLibrary.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.FunctionLibrary.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.FunctionLibrary.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.FunctionLibrary.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.FunctionLibrary.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.FunctionLibrary.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.types.GenerateContentResponse.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.GenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.GenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__", "google.generativeai.types.GenerateContentResponse.__init__": "google.generativeai.types.AsyncGenerateContentResponse.__init__", - "google.generativeai.types.GenerateContentResponse.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.GenerateContentResponse.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.GenerateContentResponse.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.GenerateContentResponse.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.GenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.GenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.GenerateContentResponse.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.GenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.GenerateContentResponse.candidates": "google.generativeai.types.AsyncGenerateContentResponse.candidates", "google.generativeai.types.GenerateContentResponse.parts": "google.generativeai.types.AsyncGenerateContentResponse.parts", "google.generativeai.types.GenerateContentResponse.prompt_feedback": "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback", "google.generativeai.types.GenerateContentResponse.text": "google.generativeai.types.AsyncGenerateContentResponse.text", "google.generativeai.types.GenerateContentResponse.to_dict": "google.generativeai.types.AsyncGenerateContentResponse.to_dict", "google.generativeai.types.GenerateContentResponse.usage_metadata": "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata", - "google.generativeai.types.GenerationConfig.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.GenerationConfig.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.GenerationConfig.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.GenerationConfig.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.GenerationConfig.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.GenerationConfig.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.GenerationConfig.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.GenerationConfigDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.GenerationConfigDict.__eq__": "google.generativeai.types.BlobDict.__eq__", "google.generativeai.types.GenerationConfigDict.__ge__": "google.generativeai.types.BlobDict.__ge__", @@ -2350,6 +2487,7 @@ "google.generativeai.types.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.types.HarmBlockThreshold.from_bytes": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes", "google.generativeai.types.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.HarmBlockThreshold.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.types.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.types.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.types.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", @@ -2396,6 +2534,7 @@ "google.generativeai.types.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate", "google.generativeai.types.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.types.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.HarmCategory.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.types.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.types.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.types.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", @@ -2447,74 +2586,27 @@ "google.generativeai.types.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.types.HarmProbability.from_bytes": "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes", "google.generativeai.types.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.HarmProbability.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.types.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.types.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real", "google.generativeai.types.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", - "google.generativeai.types.IncompleteIterationError.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.IncompleteIterationError.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.IncompleteIterationError.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.IncompleteIterationError.__init__": "google.generativeai.types.AuthorError.__init__", - "google.generativeai.types.IncompleteIterationError.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.IncompleteIterationError.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.IncompleteIterationError.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.IncompleteIterationError.__new__": "google.generativeai.types.AuthorError.__new__", - "google.generativeai.types.IncompleteIterationError.add_note": "google.generativeai.types.AuthorError.add_note", - "google.generativeai.types.IncompleteIterationError.args": "google.generativeai.types.AuthorError.args", - "google.generativeai.types.IncompleteIterationError.with_traceback": "google.generativeai.types.AuthorError.with_traceback", - "google.generativeai.types.MessageDict.__contains__": "google.generativeai.types.BlobDict.__contains__", - "google.generativeai.types.MessageDict.__eq__": "google.generativeai.types.BlobDict.__eq__", - "google.generativeai.types.MessageDict.__ge__": "google.generativeai.types.BlobDict.__ge__", - "google.generativeai.types.MessageDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", - "google.generativeai.types.MessageDict.__gt__": "google.generativeai.types.BlobDict.__gt__", - "google.generativeai.types.MessageDict.__init__": "google.generativeai.types.BlobDict.__init__", - "google.generativeai.types.MessageDict.__iter__": "google.generativeai.types.BlobDict.__iter__", - "google.generativeai.types.MessageDict.__le__": "google.generativeai.types.BlobDict.__le__", - "google.generativeai.types.MessageDict.__len__": "google.generativeai.types.BlobDict.__len__", - "google.generativeai.types.MessageDict.__lt__": "google.generativeai.types.BlobDict.__lt__", - "google.generativeai.types.MessageDict.__ne__": "google.generativeai.types.BlobDict.__ne__", - "google.generativeai.types.MessageDict.__new__": "google.generativeai.types.BlobDict.__new__", - "google.generativeai.types.MessageDict.__or__": "google.generativeai.types.BlobDict.__or__", - "google.generativeai.types.MessageDict.__ror__": "google.generativeai.types.BlobDict.__ror__", - "google.generativeai.types.MessageDict.clear": "google.generativeai.types.BlobDict.clear", - "google.generativeai.types.MessageDict.copy": "google.generativeai.types.BlobDict.copy", - "google.generativeai.types.MessageDict.get": "google.generativeai.types.BlobDict.get", - "google.generativeai.types.MessageDict.items": "google.generativeai.types.BlobDict.items", - "google.generativeai.types.MessageDict.keys": "google.generativeai.types.BlobDict.keys", - "google.generativeai.types.MessageDict.pop": "google.generativeai.types.BlobDict.pop", - "google.generativeai.types.MessageDict.popitem": "google.generativeai.types.BlobDict.popitem", - "google.generativeai.types.MessageDict.setdefault": "google.generativeai.types.BlobDict.setdefault", - "google.generativeai.types.MessageDict.update": "google.generativeai.types.BlobDict.update", - "google.generativeai.types.MessageDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.MessagePromptDict.__contains__": "google.generativeai.types.BlobDict.__contains__", - "google.generativeai.types.MessagePromptDict.__eq__": "google.generativeai.types.BlobDict.__eq__", - "google.generativeai.types.MessagePromptDict.__ge__": "google.generativeai.types.BlobDict.__ge__", - "google.generativeai.types.MessagePromptDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", - "google.generativeai.types.MessagePromptDict.__gt__": "google.generativeai.types.BlobDict.__gt__", - "google.generativeai.types.MessagePromptDict.__init__": "google.generativeai.types.BlobDict.__init__", - "google.generativeai.types.MessagePromptDict.__iter__": "google.generativeai.types.BlobDict.__iter__", - "google.generativeai.types.MessagePromptDict.__le__": "google.generativeai.types.BlobDict.__le__", - "google.generativeai.types.MessagePromptDict.__len__": "google.generativeai.types.BlobDict.__len__", - "google.generativeai.types.MessagePromptDict.__lt__": "google.generativeai.types.BlobDict.__lt__", - "google.generativeai.types.MessagePromptDict.__ne__": "google.generativeai.types.BlobDict.__ne__", - "google.generativeai.types.MessagePromptDict.__new__": "google.generativeai.types.BlobDict.__new__", - "google.generativeai.types.MessagePromptDict.__or__": "google.generativeai.types.BlobDict.__or__", - "google.generativeai.types.MessagePromptDict.__ror__": "google.generativeai.types.BlobDict.__ror__", - "google.generativeai.types.MessagePromptDict.clear": "google.generativeai.types.BlobDict.clear", - "google.generativeai.types.MessagePromptDict.copy": "google.generativeai.types.BlobDict.copy", - "google.generativeai.types.MessagePromptDict.get": "google.generativeai.types.BlobDict.get", - "google.generativeai.types.MessagePromptDict.items": "google.generativeai.types.BlobDict.items", - "google.generativeai.types.MessagePromptDict.keys": "google.generativeai.types.BlobDict.keys", - "google.generativeai.types.MessagePromptDict.pop": "google.generativeai.types.BlobDict.pop", - "google.generativeai.types.MessagePromptDict.popitem": "google.generativeai.types.BlobDict.popitem", - "google.generativeai.types.MessagePromptDict.setdefault": "google.generativeai.types.BlobDict.setdefault", - "google.generativeai.types.MessagePromptDict.update": "google.generativeai.types.BlobDict.update", - "google.generativeai.types.MessagePromptDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.Model.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.Model.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.Model.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.Model.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.Model.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.Model.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.IncompleteIterationError.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.IncompleteIterationError.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.IncompleteIterationError.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.IncompleteIterationError.__init__": "google.generativeai.types.BlockedPromptException.__init__", + "google.generativeai.types.IncompleteIterationError.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.IncompleteIterationError.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.IncompleteIterationError.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.IncompleteIterationError.__new__": "google.generativeai.types.BlockedPromptException.__new__", + "google.generativeai.types.IncompleteIterationError.add_note": "google.generativeai.types.BlockedPromptException.add_note", + "google.generativeai.types.IncompleteIterationError.args": "google.generativeai.types.BlockedPromptException.args", + "google.generativeai.types.IncompleteIterationError.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback", + "google.generativeai.types.Model.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.Model.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.Model.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.Model.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.Model.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.Model.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.ModelNameOptions": "google.generativeai.types.AnyModelNameOptions", "google.generativeai.types.PartDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.PartDict.__eq__": "google.generativeai.types.BlobDict.__eq__", @@ -2540,49 +2632,25 @@ "google.generativeai.types.PartDict.setdefault": "google.generativeai.types.BlobDict.setdefault", "google.generativeai.types.PartDict.update": "google.generativeai.types.BlobDict.update", "google.generativeai.types.PartDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.Permission.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.Permission.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.Permission.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.Permission.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.Permission.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.Permission.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.Permissions.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.Permissions.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.Permissions.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.Permissions.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.Permissions.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.Permissions.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.Permissions.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.RequestOptions.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.RequestOptions.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.RequestOptions.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.RequestOptions.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.RequestOptions.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.RequestOptions.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", - "google.generativeai.types.ResponseDict.__contains__": "google.generativeai.types.BlobDict.__contains__", - "google.generativeai.types.ResponseDict.__eq__": "google.generativeai.types.BlobDict.__eq__", - "google.generativeai.types.ResponseDict.__ge__": "google.generativeai.types.BlobDict.__ge__", - "google.generativeai.types.ResponseDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__", - "google.generativeai.types.ResponseDict.__gt__": "google.generativeai.types.BlobDict.__gt__", - "google.generativeai.types.ResponseDict.__init__": "google.generativeai.types.BlobDict.__init__", - "google.generativeai.types.ResponseDict.__iter__": "google.generativeai.types.BlobDict.__iter__", - "google.generativeai.types.ResponseDict.__le__": "google.generativeai.types.BlobDict.__le__", - "google.generativeai.types.ResponseDict.__len__": "google.generativeai.types.BlobDict.__len__", - "google.generativeai.types.ResponseDict.__lt__": "google.generativeai.types.BlobDict.__lt__", - "google.generativeai.types.ResponseDict.__ne__": "google.generativeai.types.BlobDict.__ne__", - "google.generativeai.types.ResponseDict.__new__": "google.generativeai.types.BlobDict.__new__", - "google.generativeai.types.ResponseDict.__or__": "google.generativeai.types.BlobDict.__or__", - "google.generativeai.types.ResponseDict.__ror__": "google.generativeai.types.BlobDict.__ror__", - "google.generativeai.types.ResponseDict.clear": "google.generativeai.types.BlobDict.clear", - "google.generativeai.types.ResponseDict.copy": "google.generativeai.types.BlobDict.copy", - "google.generativeai.types.ResponseDict.get": "google.generativeai.types.BlobDict.get", - "google.generativeai.types.ResponseDict.items": "google.generativeai.types.BlobDict.items", - "google.generativeai.types.ResponseDict.keys": "google.generativeai.types.BlobDict.keys", - "google.generativeai.types.ResponseDict.pop": "google.generativeai.types.BlobDict.pop", - "google.generativeai.types.ResponseDict.popitem": "google.generativeai.types.BlobDict.popitem", - "google.generativeai.types.ResponseDict.setdefault": "google.generativeai.types.BlobDict.setdefault", - "google.generativeai.types.ResponseDict.update": "google.generativeai.types.BlobDict.update", - "google.generativeai.types.ResponseDict.values": "google.generativeai.types.BlobDict.values", + "google.generativeai.types.Permission.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.Permission.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.Permission.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.Permission.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.Permission.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.Permission.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.types.Permissions.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.Permissions.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.Permissions.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.Permissions.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.Permissions.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.Permissions.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.Permissions.__new__": "google.generativeai.caching.CachedContent.__new__", + "google.generativeai.types.RequestOptions.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.RequestOptions.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.RequestOptions.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.RequestOptions.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.RequestOptions.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.RequestOptions.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.SafetyFeedbackDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.SafetyFeedbackDict.__eq__": "google.generativeai.types.BlobDict.__eq__", "google.generativeai.types.SafetyFeedbackDict.__ge__": "google.generativeai.types.BlobDict.__ge__", @@ -2655,24 +2723,24 @@ "google.generativeai.types.SafetySettingDict.setdefault": "google.generativeai.types.BlobDict.setdefault", "google.generativeai.types.SafetySettingDict.update": "google.generativeai.types.BlobDict.update", "google.generativeai.types.SafetySettingDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.StopCandidateException.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.StopCandidateException.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.StopCandidateException.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.StopCandidateException.__init__": "google.generativeai.types.AuthorError.__init__", - "google.generativeai.types.StopCandidateException.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.StopCandidateException.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.StopCandidateException.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.StopCandidateException.__new__": "google.generativeai.types.AuthorError.__new__", - "google.generativeai.types.StopCandidateException.add_note": "google.generativeai.types.AuthorError.add_note", - "google.generativeai.types.StopCandidateException.args": "google.generativeai.types.AuthorError.args", - "google.generativeai.types.StopCandidateException.with_traceback": "google.generativeai.types.AuthorError.with_traceback", - "google.generativeai.types.Tool.__eq__": "google.generativeai.types.AsyncGenerateContentResponse.__eq__", - "google.generativeai.types.Tool.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.Tool.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.Tool.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.Tool.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.Tool.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.Tool.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.StopCandidateException.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.StopCandidateException.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.StopCandidateException.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.StopCandidateException.__init__": "google.generativeai.types.BlockedPromptException.__init__", + "google.generativeai.types.StopCandidateException.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.StopCandidateException.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.StopCandidateException.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.StopCandidateException.__new__": "google.generativeai.types.BlockedPromptException.__new__", + "google.generativeai.types.StopCandidateException.add_note": "google.generativeai.types.BlockedPromptException.add_note", + "google.generativeai.types.StopCandidateException.args": "google.generativeai.types.BlockedPromptException.args", + "google.generativeai.types.StopCandidateException.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback", + "google.generativeai.types.Tool.__eq__": "google.generativeai.caching.CachedContent.__eq__", + "google.generativeai.types.Tool.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.Tool.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.Tool.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.Tool.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.Tool.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.Tool.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.ToolDict.__contains__": "google.generativeai.types.BlobDict.__contains__", "google.generativeai.types.ToolDict.__eq__": "google.generativeai.types.BlobDict.__eq__", "google.generativeai.types.ToolDict.__ge__": "google.generativeai.types.BlobDict.__ge__", @@ -2697,12 +2765,12 @@ "google.generativeai.types.ToolDict.setdefault": "google.generativeai.types.BlobDict.setdefault", "google.generativeai.types.ToolDict.update": "google.generativeai.types.BlobDict.update", "google.generativeai.types.ToolDict.values": "google.generativeai.types.BlobDict.values", - "google.generativeai.types.TunedModel.__ge__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__", - "google.generativeai.types.TunedModel.__gt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__", - "google.generativeai.types.TunedModel.__le__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__", - "google.generativeai.types.TunedModel.__lt__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__", - "google.generativeai.types.TunedModel.__ne__": "google.generativeai.types.AsyncGenerateContentResponse.__ne__", - "google.generativeai.types.TunedModel.__new__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__", + "google.generativeai.types.TunedModel.__ge__": "google.generativeai.caching.CachedContent.__ge__", + "google.generativeai.types.TunedModel.__gt__": "google.generativeai.caching.CachedContent.__gt__", + "google.generativeai.types.TunedModel.__le__": "google.generativeai.caching.CachedContent.__le__", + "google.generativeai.types.TunedModel.__lt__": "google.generativeai.caching.CachedContent.__lt__", + "google.generativeai.types.TunedModel.__ne__": "google.generativeai.caching.CachedContent.__ne__", + "google.generativeai.types.TunedModel.__new__": "google.generativeai.caching.CachedContent.__new__", "google.generativeai.types.TunedModelState.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__", "google.generativeai.types.TunedModelState.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__", "google.generativeai.types.TunedModelState.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__", @@ -2751,9 +2819,11 @@ "google.generativeai.types.TunedModelState.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator", "google.generativeai.types.TunedModelState.from_bytes": "google.generativeai.protos.TunedModel.State.from_bytes", "google.generativeai.types.TunedModelState.imag": "google.generativeai.protos.Candidate.FinishReason.imag", + "google.generativeai.types.TunedModelState.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer", "google.generativeai.types.TunedModelState.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator", "google.generativeai.types.TunedModelState.real": "google.generativeai.protos.Candidate.FinishReason.real", - "google.generativeai.types.TunedModelState.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes" + "google.generativeai.types.TunedModelState.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes", + "google.generativeai.types.annotations": "google.generativeai.caching.annotations" }, "is_fragment": { "google.generativeai": false, @@ -2781,9 +2851,14 @@ "google.generativeai.GenerationConfig.__ne__": true, "google.generativeai.GenerationConfig.__new__": true, "google.generativeai.GenerationConfig.candidate_count": true, + "google.generativeai.GenerationConfig.frequency_penalty": true, + "google.generativeai.GenerationConfig.logprobs": true, "google.generativeai.GenerationConfig.max_output_tokens": true, + "google.generativeai.GenerationConfig.presence_penalty": true, + "google.generativeai.GenerationConfig.response_logprobs": true, "google.generativeai.GenerationConfig.response_mime_type": true, "google.generativeai.GenerationConfig.response_schema": true, + "google.generativeai.GenerationConfig.seed": true, "google.generativeai.GenerationConfig.stop_sequences": true, "google.generativeai.GenerationConfig.temperature": true, "google.generativeai.GenerationConfig.top_k": true, @@ -2807,18 +2882,36 @@ "google.generativeai.GenerativeModel.start_chat": true, "google.generativeai.__version__": true, "google.generativeai.annotations": true, - "google.generativeai.chat": false, - "google.generativeai.chat_async": false, + "google.generativeai.caching": false, + "google.generativeai.caching.CachedContent": false, + "google.generativeai.caching.CachedContent.__eq__": true, + "google.generativeai.caching.CachedContent.__ge__": true, + "google.generativeai.caching.CachedContent.__gt__": true, + "google.generativeai.caching.CachedContent.__init__": true, + "google.generativeai.caching.CachedContent.__le__": true, + "google.generativeai.caching.CachedContent.__lt__": true, + "google.generativeai.caching.CachedContent.__ne__": true, + "google.generativeai.caching.CachedContent.__new__": true, + "google.generativeai.caching.CachedContent.create": true, + "google.generativeai.caching.CachedContent.create_time": true, + "google.generativeai.caching.CachedContent.delete": true, + "google.generativeai.caching.CachedContent.display_name": true, + "google.generativeai.caching.CachedContent.expire_time": true, + "google.generativeai.caching.CachedContent.get": true, + "google.generativeai.caching.CachedContent.list": true, + "google.generativeai.caching.CachedContent.model": true, + "google.generativeai.caching.CachedContent.name": true, + "google.generativeai.caching.CachedContent.update": true, + "google.generativeai.caching.CachedContent.update_time": true, + "google.generativeai.caching.CachedContent.usage_metadata": true, + "google.generativeai.caching.annotations": true, + "google.generativeai.caching.get_default_cache_client": false, "google.generativeai.configure": false, - "google.generativeai.count_message_tokens": false, - "google.generativeai.count_text_tokens": false, "google.generativeai.create_tuned_model": false, "google.generativeai.delete_file": false, "google.generativeai.delete_tuned_model": false, "google.generativeai.embed_content": false, "google.generativeai.embed_content_async": false, - "google.generativeai.generate_embeddings": false, - "google.generativeai.generate_text": false, "google.generativeai.get_base_model": false, "google.generativeai.get_file": false, "google.generativeai.get_model": false, @@ -3182,11 +3275,16 @@ "google.generativeai.protos.CachedContent.wrap": true, "google.generativeai.protos.Candidate": false, "google.generativeai.protos.Candidate.FinishReason": false, + "google.generativeai.protos.Candidate.FinishReason.BLOCKLIST": true, "google.generativeai.protos.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED": true, + "google.generativeai.protos.Candidate.FinishReason.LANGUAGE": true, + "google.generativeai.protos.Candidate.FinishReason.MALFORMED_FUNCTION_CALL": true, "google.generativeai.protos.Candidate.FinishReason.MAX_TOKENS": true, "google.generativeai.protos.Candidate.FinishReason.OTHER": true, + "google.generativeai.protos.Candidate.FinishReason.PROHIBITED_CONTENT": true, "google.generativeai.protos.Candidate.FinishReason.RECITATION": true, "google.generativeai.protos.Candidate.FinishReason.SAFETY": true, + "google.generativeai.protos.Candidate.FinishReason.SPII": true, "google.generativeai.protos.Candidate.FinishReason.STOP": true, "google.generativeai.protos.Candidate.FinishReason.__abs__": true, "google.generativeai.protos.Candidate.FinishReason.__add__": true, @@ -3236,6 +3334,7 @@ "google.generativeai.protos.Candidate.FinishReason.denominator": true, "google.generativeai.protos.Candidate.FinishReason.from_bytes": true, "google.generativeai.protos.Candidate.FinishReason.imag": true, + "google.generativeai.protos.Candidate.FinishReason.is_integer": true, "google.generativeai.protos.Candidate.FinishReason.numerator": true, "google.generativeai.protos.Candidate.FinishReason.real": true, "google.generativeai.protos.Candidate.FinishReason.to_bytes": true, @@ -3250,6 +3349,7 @@ "google.generativeai.protos.Candidate.__new__": true, "google.generativeai.protos.Candidate.__or__": true, "google.generativeai.protos.Candidate.__ror__": true, + "google.generativeai.protos.Candidate.avg_logprobs": true, "google.generativeai.protos.Candidate.citation_metadata": true, "google.generativeai.protos.Candidate.content": true, "google.generativeai.protos.Candidate.copy_from": true, @@ -3257,7 +3357,9 @@ "google.generativeai.protos.Candidate.finish_reason": true, "google.generativeai.protos.Candidate.from_json": true, "google.generativeai.protos.Candidate.grounding_attributions": true, + "google.generativeai.protos.Candidate.grounding_metadata": true, "google.generativeai.protos.Candidate.index": true, + "google.generativeai.protos.Candidate.logprobs_result": true, "google.generativeai.protos.Candidate.mro": true, "google.generativeai.protos.Candidate.pb": true, "google.generativeai.protos.Candidate.safety_ratings": true, @@ -3320,6 +3422,7 @@ "google.generativeai.protos.Chunk.State.denominator": true, "google.generativeai.protos.Chunk.State.from_bytes": true, "google.generativeai.protos.Chunk.State.imag": true, + "google.generativeai.protos.Chunk.State.is_integer": true, "google.generativeai.protos.Chunk.State.numerator": true, "google.generativeai.protos.Chunk.State.real": true, "google.generativeai.protos.Chunk.State.to_bytes": true, @@ -3493,6 +3596,7 @@ "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": true, "google.generativeai.protos.CodeExecutionResult.Outcome.from_bytes": true, "google.generativeai.protos.CodeExecutionResult.Outcome.imag": true, + "google.generativeai.protos.CodeExecutionResult.Outcome.is_integer": true, "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": true, "google.generativeai.protos.CodeExecutionResult.Outcome.real": true, "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": true, @@ -3577,6 +3681,7 @@ "google.generativeai.protos.Condition.Operator.denominator": true, "google.generativeai.protos.Condition.Operator.from_bytes": true, "google.generativeai.protos.Condition.Operator.imag": true, + "google.generativeai.protos.Condition.Operator.is_integer": true, "google.generativeai.protos.Condition.Operator.numerator": true, "google.generativeai.protos.Condition.Operator.real": true, "google.generativeai.protos.Condition.Operator.to_bytes": true, @@ -3701,6 +3806,7 @@ "google.generativeai.protos.ContentFilter.BlockedReason.denominator": true, "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes": true, "google.generativeai.protos.ContentFilter.BlockedReason.imag": true, + "google.generativeai.protos.ContentFilter.BlockedReason.is_integer": true, "google.generativeai.protos.ContentFilter.BlockedReason.numerator": true, "google.generativeai.protos.ContentFilter.BlockedReason.real": true, "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": true, @@ -4323,6 +4429,84 @@ "google.generativeai.protos.Document.to_json": true, "google.generativeai.protos.Document.update_time": true, "google.generativeai.protos.Document.wrap": true, + "google.generativeai.protos.DynamicRetrievalConfig": false, + "google.generativeai.protos.DynamicRetrievalConfig.Mode": false, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.MODE_DYNAMIC": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.MODE_UNSPECIFIED": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__abs__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__add__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__and__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__bool__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__contains__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__eq__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__floordiv__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ge__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__getitem__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__gt__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__init__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__invert__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__iter__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__le__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__len__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lshift__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lt__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mod__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mul__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ne__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__neg__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__new__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__or__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pos__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pow__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__radd__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rand__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rfloordiv__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rlshift__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmod__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmul__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ror__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rpow__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rrshift__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rshift__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rsub__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rtruediv__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rxor__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__sub__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__truediv__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__xor__": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.as_integer_ratio": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_count": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_length": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.conjugate": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.denominator": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.from_bytes": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.imag": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.is_integer": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.numerator": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.real": true, + "google.generativeai.protos.DynamicRetrievalConfig.Mode.to_bytes": true, + "google.generativeai.protos.DynamicRetrievalConfig.__call__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__eq__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__ge__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__gt__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__init__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__le__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__lt__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__ne__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__new__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__or__": true, + "google.generativeai.protos.DynamicRetrievalConfig.__ror__": true, + "google.generativeai.protos.DynamicRetrievalConfig.copy_from": true, + "google.generativeai.protos.DynamicRetrievalConfig.deserialize": true, + "google.generativeai.protos.DynamicRetrievalConfig.dynamic_threshold": true, + "google.generativeai.protos.DynamicRetrievalConfig.from_json": true, + "google.generativeai.protos.DynamicRetrievalConfig.mode": true, + "google.generativeai.protos.DynamicRetrievalConfig.mro": true, + "google.generativeai.protos.DynamicRetrievalConfig.pb": true, + "google.generativeai.protos.DynamicRetrievalConfig.serialize": true, + "google.generativeai.protos.DynamicRetrievalConfig.to_dict": true, + "google.generativeai.protos.DynamicRetrievalConfig.to_json": true, + "google.generativeai.protos.DynamicRetrievalConfig.wrap": true, "google.generativeai.protos.EmbedContentRequest": false, "google.generativeai.protos.EmbedContentRequest.__call__": true, "google.generativeai.protos.EmbedContentRequest.__eq__": true, @@ -4513,6 +4697,7 @@ "google.generativeai.protos.ExecutableCode.Language.denominator": true, "google.generativeai.protos.ExecutableCode.Language.from_bytes": true, "google.generativeai.protos.ExecutableCode.Language.imag": true, + "google.generativeai.protos.ExecutableCode.Language.is_integer": true, "google.generativeai.protos.ExecutableCode.Language.numerator": true, "google.generativeai.protos.ExecutableCode.Language.real": true, "google.generativeai.protos.ExecutableCode.Language.to_bytes": true, @@ -4592,6 +4777,7 @@ "google.generativeai.protos.File.State.denominator": true, "google.generativeai.protos.File.State.from_bytes": true, "google.generativeai.protos.File.State.imag": true, + "google.generativeai.protos.File.State.is_integer": true, "google.generativeai.protos.File.State.numerator": true, "google.generativeai.protos.File.State.real": true, "google.generativeai.protos.File.State.to_bytes": true, @@ -4727,6 +4913,7 @@ "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": true, "google.generativeai.protos.FunctionCallingConfig.Mode.from_bytes": true, "google.generativeai.protos.FunctionCallingConfig.Mode.imag": true, + "google.generativeai.protos.FunctionCallingConfig.Mode.is_integer": true, "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": true, "google.generativeai.protos.FunctionCallingConfig.Mode.real": true, "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": true, @@ -4853,6 +5040,7 @@ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": true, "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.from_bytes": true, "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": true, + "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.is_integer": true, "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": true, "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": true, "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": true, @@ -4937,6 +5125,7 @@ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": true, "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.from_bytes": true, "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": true, + "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.is_integer": true, "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": true, "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": true, "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": true, @@ -5017,8 +5206,10 @@ "google.generativeai.protos.GenerateContentResponse": false, "google.generativeai.protos.GenerateContentResponse.PromptFeedback": false, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": false, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCKLIST": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.OTHER": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.PROHIBITED_CONTENT": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.SAFETY": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": true, @@ -5068,6 +5259,7 @@ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.from_bytes": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": true, + "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.is_integer": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": true, "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": true, @@ -5261,10 +5453,14 @@ "google.generativeai.protos.GenerationConfig.candidate_count": true, "google.generativeai.protos.GenerationConfig.copy_from": true, "google.generativeai.protos.GenerationConfig.deserialize": true, + "google.generativeai.protos.GenerationConfig.frequency_penalty": true, "google.generativeai.protos.GenerationConfig.from_json": true, + "google.generativeai.protos.GenerationConfig.logprobs": true, "google.generativeai.protos.GenerationConfig.max_output_tokens": true, "google.generativeai.protos.GenerationConfig.mro": true, "google.generativeai.protos.GenerationConfig.pb": true, + "google.generativeai.protos.GenerationConfig.presence_penalty": true, + "google.generativeai.protos.GenerationConfig.response_logprobs": true, "google.generativeai.protos.GenerationConfig.response_mime_type": true, "google.generativeai.protos.GenerationConfig.response_schema": true, "google.generativeai.protos.GenerationConfig.serialize": true, @@ -5451,6 +5647,28 @@ "google.generativeai.protos.GetTunedModelRequest.to_dict": true, "google.generativeai.protos.GetTunedModelRequest.to_json": true, "google.generativeai.protos.GetTunedModelRequest.wrap": true, + "google.generativeai.protos.GoogleSearchRetrieval": false, + "google.generativeai.protos.GoogleSearchRetrieval.__call__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__eq__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__ge__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__gt__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__init__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__le__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__lt__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__ne__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__new__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__or__": true, + "google.generativeai.protos.GoogleSearchRetrieval.__ror__": true, + "google.generativeai.protos.GoogleSearchRetrieval.copy_from": true, + "google.generativeai.protos.GoogleSearchRetrieval.deserialize": true, + "google.generativeai.protos.GoogleSearchRetrieval.dynamic_retrieval_config": true, + "google.generativeai.protos.GoogleSearchRetrieval.from_json": true, + "google.generativeai.protos.GoogleSearchRetrieval.mro": true, + "google.generativeai.protos.GoogleSearchRetrieval.pb": true, + "google.generativeai.protos.GoogleSearchRetrieval.serialize": true, + "google.generativeai.protos.GoogleSearchRetrieval.to_dict": true, + "google.generativeai.protos.GoogleSearchRetrieval.to_json": true, + "google.generativeai.protos.GoogleSearchRetrieval.wrap": true, "google.generativeai.protos.GroundingAttribution": false, "google.generativeai.protos.GroundingAttribution.__call__": true, "google.generativeai.protos.GroundingAttribution.__eq__": true, @@ -5474,6 +5692,76 @@ "google.generativeai.protos.GroundingAttribution.to_dict": true, "google.generativeai.protos.GroundingAttribution.to_json": true, "google.generativeai.protos.GroundingAttribution.wrap": true, + "google.generativeai.protos.GroundingChunk": false, + "google.generativeai.protos.GroundingChunk.Web": false, + "google.generativeai.protos.GroundingChunk.Web.__call__": true, + "google.generativeai.protos.GroundingChunk.Web.__eq__": true, + "google.generativeai.protos.GroundingChunk.Web.__ge__": true, + "google.generativeai.protos.GroundingChunk.Web.__gt__": true, + "google.generativeai.protos.GroundingChunk.Web.__init__": true, + "google.generativeai.protos.GroundingChunk.Web.__le__": true, + "google.generativeai.protos.GroundingChunk.Web.__lt__": true, + "google.generativeai.protos.GroundingChunk.Web.__ne__": true, + "google.generativeai.protos.GroundingChunk.Web.__new__": true, + "google.generativeai.protos.GroundingChunk.Web.__or__": true, + "google.generativeai.protos.GroundingChunk.Web.__ror__": true, + "google.generativeai.protos.GroundingChunk.Web.copy_from": true, + "google.generativeai.protos.GroundingChunk.Web.deserialize": true, + "google.generativeai.protos.GroundingChunk.Web.from_json": true, + "google.generativeai.protos.GroundingChunk.Web.mro": true, + "google.generativeai.protos.GroundingChunk.Web.pb": true, + "google.generativeai.protos.GroundingChunk.Web.serialize": true, + "google.generativeai.protos.GroundingChunk.Web.title": true, + "google.generativeai.protos.GroundingChunk.Web.to_dict": true, + "google.generativeai.protos.GroundingChunk.Web.to_json": true, + "google.generativeai.protos.GroundingChunk.Web.uri": true, + "google.generativeai.protos.GroundingChunk.Web.wrap": true, + "google.generativeai.protos.GroundingChunk.__call__": true, + "google.generativeai.protos.GroundingChunk.__eq__": true, + "google.generativeai.protos.GroundingChunk.__ge__": true, + "google.generativeai.protos.GroundingChunk.__gt__": true, + "google.generativeai.protos.GroundingChunk.__init__": true, + "google.generativeai.protos.GroundingChunk.__le__": true, + "google.generativeai.protos.GroundingChunk.__lt__": true, + "google.generativeai.protos.GroundingChunk.__ne__": true, + "google.generativeai.protos.GroundingChunk.__new__": true, + "google.generativeai.protos.GroundingChunk.__or__": true, + "google.generativeai.protos.GroundingChunk.__ror__": true, + "google.generativeai.protos.GroundingChunk.copy_from": true, + "google.generativeai.protos.GroundingChunk.deserialize": true, + "google.generativeai.protos.GroundingChunk.from_json": true, + "google.generativeai.protos.GroundingChunk.mro": true, + "google.generativeai.protos.GroundingChunk.pb": true, + "google.generativeai.protos.GroundingChunk.serialize": true, + "google.generativeai.protos.GroundingChunk.to_dict": true, + "google.generativeai.protos.GroundingChunk.to_json": true, + "google.generativeai.protos.GroundingChunk.web": true, + "google.generativeai.protos.GroundingChunk.wrap": true, + "google.generativeai.protos.GroundingMetadata": false, + "google.generativeai.protos.GroundingMetadata.__call__": true, + "google.generativeai.protos.GroundingMetadata.__eq__": true, + "google.generativeai.protos.GroundingMetadata.__ge__": true, + "google.generativeai.protos.GroundingMetadata.__gt__": true, + "google.generativeai.protos.GroundingMetadata.__init__": true, + "google.generativeai.protos.GroundingMetadata.__le__": true, + "google.generativeai.protos.GroundingMetadata.__lt__": true, + "google.generativeai.protos.GroundingMetadata.__ne__": true, + "google.generativeai.protos.GroundingMetadata.__new__": true, + "google.generativeai.protos.GroundingMetadata.__or__": true, + "google.generativeai.protos.GroundingMetadata.__ror__": true, + "google.generativeai.protos.GroundingMetadata.copy_from": true, + "google.generativeai.protos.GroundingMetadata.deserialize": true, + "google.generativeai.protos.GroundingMetadata.from_json": true, + "google.generativeai.protos.GroundingMetadata.grounding_chunks": true, + "google.generativeai.protos.GroundingMetadata.grounding_supports": true, + "google.generativeai.protos.GroundingMetadata.mro": true, + "google.generativeai.protos.GroundingMetadata.pb": true, + "google.generativeai.protos.GroundingMetadata.retrieval_metadata": true, + "google.generativeai.protos.GroundingMetadata.search_entry_point": true, + "google.generativeai.protos.GroundingMetadata.serialize": true, + "google.generativeai.protos.GroundingMetadata.to_dict": true, + "google.generativeai.protos.GroundingMetadata.to_json": true, + "google.generativeai.protos.GroundingMetadata.wrap": true, "google.generativeai.protos.GroundingPassage": false, "google.generativeai.protos.GroundingPassage.__call__": true, "google.generativeai.protos.GroundingPassage.__eq__": true, @@ -5519,7 +5807,32 @@ "google.generativeai.protos.GroundingPassages.to_dict": true, "google.generativeai.protos.GroundingPassages.to_json": true, "google.generativeai.protos.GroundingPassages.wrap": true, + "google.generativeai.protos.GroundingSupport": false, + "google.generativeai.protos.GroundingSupport.__call__": true, + "google.generativeai.protos.GroundingSupport.__eq__": true, + "google.generativeai.protos.GroundingSupport.__ge__": true, + "google.generativeai.protos.GroundingSupport.__gt__": true, + "google.generativeai.protos.GroundingSupport.__init__": true, + "google.generativeai.protos.GroundingSupport.__le__": true, + "google.generativeai.protos.GroundingSupport.__lt__": true, + "google.generativeai.protos.GroundingSupport.__ne__": true, + "google.generativeai.protos.GroundingSupport.__new__": true, + "google.generativeai.protos.GroundingSupport.__or__": true, + "google.generativeai.protos.GroundingSupport.__ror__": true, + "google.generativeai.protos.GroundingSupport.confidence_scores": true, + "google.generativeai.protos.GroundingSupport.copy_from": true, + "google.generativeai.protos.GroundingSupport.deserialize": true, + "google.generativeai.protos.GroundingSupport.from_json": true, + "google.generativeai.protos.GroundingSupport.grounding_chunk_indices": true, + "google.generativeai.protos.GroundingSupport.mro": true, + "google.generativeai.protos.GroundingSupport.pb": true, + "google.generativeai.protos.GroundingSupport.segment": true, + "google.generativeai.protos.GroundingSupport.serialize": true, + "google.generativeai.protos.GroundingSupport.to_dict": true, + "google.generativeai.protos.GroundingSupport.to_json": true, + "google.generativeai.protos.GroundingSupport.wrap": true, "google.generativeai.protos.HarmCategory": false, + "google.generativeai.protos.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY": true, "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS": true, "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true, "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DEROGATORY": true, @@ -5579,6 +5892,7 @@ "google.generativeai.protos.HarmCategory.denominator": true, "google.generativeai.protos.HarmCategory.from_bytes": true, "google.generativeai.protos.HarmCategory.imag": true, + "google.generativeai.protos.HarmCategory.is_integer": true, "google.generativeai.protos.HarmCategory.numerator": true, "google.generativeai.protos.HarmCategory.real": true, "google.generativeai.protos.HarmCategory.to_bytes": true, @@ -5979,6 +6293,75 @@ "google.generativeai.protos.ListTunedModelsResponse.to_json": true, "google.generativeai.protos.ListTunedModelsResponse.tuned_models": true, "google.generativeai.protos.ListTunedModelsResponse.wrap": true, + "google.generativeai.protos.LogprobsResult": false, + "google.generativeai.protos.LogprobsResult.Candidate": false, + "google.generativeai.protos.LogprobsResult.Candidate.__call__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__eq__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__ge__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__gt__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__init__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__le__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__lt__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__ne__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__new__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__or__": true, + "google.generativeai.protos.LogprobsResult.Candidate.__ror__": true, + "google.generativeai.protos.LogprobsResult.Candidate.copy_from": true, + "google.generativeai.protos.LogprobsResult.Candidate.deserialize": true, + "google.generativeai.protos.LogprobsResult.Candidate.from_json": true, + "google.generativeai.protos.LogprobsResult.Candidate.log_probability": true, + "google.generativeai.protos.LogprobsResult.Candidate.mro": true, + "google.generativeai.protos.LogprobsResult.Candidate.pb": true, + "google.generativeai.protos.LogprobsResult.Candidate.serialize": true, + "google.generativeai.protos.LogprobsResult.Candidate.to_dict": true, + "google.generativeai.protos.LogprobsResult.Candidate.to_json": true, + "google.generativeai.protos.LogprobsResult.Candidate.token": true, + "google.generativeai.protos.LogprobsResult.Candidate.token_id": true, + "google.generativeai.protos.LogprobsResult.Candidate.wrap": true, + "google.generativeai.protos.LogprobsResult.TopCandidates": false, + "google.generativeai.protos.LogprobsResult.TopCandidates.__call__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__eq__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__ge__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__gt__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__init__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__le__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__lt__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__ne__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__new__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__or__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.__ror__": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.candidates": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.copy_from": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.deserialize": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.from_json": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.mro": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.pb": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.serialize": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.to_dict": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.to_json": true, + "google.generativeai.protos.LogprobsResult.TopCandidates.wrap": true, + "google.generativeai.protos.LogprobsResult.__call__": true, + "google.generativeai.protos.LogprobsResult.__eq__": true, + "google.generativeai.protos.LogprobsResult.__ge__": true, + "google.generativeai.protos.LogprobsResult.__gt__": true, + "google.generativeai.protos.LogprobsResult.__init__": true, + "google.generativeai.protos.LogprobsResult.__le__": true, + "google.generativeai.protos.LogprobsResult.__lt__": true, + "google.generativeai.protos.LogprobsResult.__ne__": true, + "google.generativeai.protos.LogprobsResult.__new__": true, + "google.generativeai.protos.LogprobsResult.__or__": true, + "google.generativeai.protos.LogprobsResult.__ror__": true, + "google.generativeai.protos.LogprobsResult.chosen_candidates": true, + "google.generativeai.protos.LogprobsResult.copy_from": true, + "google.generativeai.protos.LogprobsResult.deserialize": true, + "google.generativeai.protos.LogprobsResult.from_json": true, + "google.generativeai.protos.LogprobsResult.mro": true, + "google.generativeai.protos.LogprobsResult.pb": true, + "google.generativeai.protos.LogprobsResult.serialize": true, + "google.generativeai.protos.LogprobsResult.to_dict": true, + "google.generativeai.protos.LogprobsResult.to_json": true, + "google.generativeai.protos.LogprobsResult.top_candidates": true, + "google.generativeai.protos.LogprobsResult.wrap": true, "google.generativeai.protos.Message": false, "google.generativeai.protos.Message.__call__": true, "google.generativeai.protos.Message.__eq__": true, @@ -6165,6 +6548,7 @@ "google.generativeai.protos.Permission.GranteeType.denominator": true, "google.generativeai.protos.Permission.GranteeType.from_bytes": true, "google.generativeai.protos.Permission.GranteeType.imag": true, + "google.generativeai.protos.Permission.GranteeType.is_integer": true, "google.generativeai.protos.Permission.GranteeType.numerator": true, "google.generativeai.protos.Permission.GranteeType.real": true, "google.generativeai.protos.Permission.GranteeType.to_bytes": true, @@ -6221,6 +6605,7 @@ "google.generativeai.protos.Permission.Role.denominator": true, "google.generativeai.protos.Permission.Role.from_bytes": true, "google.generativeai.protos.Permission.Role.imag": true, + "google.generativeai.protos.Permission.Role.is_integer": true, "google.generativeai.protos.Permission.Role.numerator": true, "google.generativeai.protos.Permission.Role.real": true, "google.generativeai.protos.Permission.Role.to_bytes": true, @@ -6248,6 +6633,52 @@ "google.generativeai.protos.Permission.to_dict": true, "google.generativeai.protos.Permission.to_json": true, "google.generativeai.protos.Permission.wrap": true, + "google.generativeai.protos.PredictRequest": false, + "google.generativeai.protos.PredictRequest.__call__": true, + "google.generativeai.protos.PredictRequest.__eq__": true, + "google.generativeai.protos.PredictRequest.__ge__": true, + "google.generativeai.protos.PredictRequest.__gt__": true, + "google.generativeai.protos.PredictRequest.__init__": true, + "google.generativeai.protos.PredictRequest.__le__": true, + "google.generativeai.protos.PredictRequest.__lt__": true, + "google.generativeai.protos.PredictRequest.__ne__": true, + "google.generativeai.protos.PredictRequest.__new__": true, + "google.generativeai.protos.PredictRequest.__or__": true, + "google.generativeai.protos.PredictRequest.__ror__": true, + "google.generativeai.protos.PredictRequest.copy_from": true, + "google.generativeai.protos.PredictRequest.deserialize": true, + "google.generativeai.protos.PredictRequest.from_json": true, + "google.generativeai.protos.PredictRequest.instances": true, + "google.generativeai.protos.PredictRequest.model": true, + "google.generativeai.protos.PredictRequest.mro": true, + "google.generativeai.protos.PredictRequest.parameters": true, + "google.generativeai.protos.PredictRequest.pb": true, + "google.generativeai.protos.PredictRequest.serialize": true, + "google.generativeai.protos.PredictRequest.to_dict": true, + "google.generativeai.protos.PredictRequest.to_json": true, + "google.generativeai.protos.PredictRequest.wrap": true, + "google.generativeai.protos.PredictResponse": false, + "google.generativeai.protos.PredictResponse.__call__": true, + "google.generativeai.protos.PredictResponse.__eq__": true, + "google.generativeai.protos.PredictResponse.__ge__": true, + "google.generativeai.protos.PredictResponse.__gt__": true, + "google.generativeai.protos.PredictResponse.__init__": true, + "google.generativeai.protos.PredictResponse.__le__": true, + "google.generativeai.protos.PredictResponse.__lt__": true, + "google.generativeai.protos.PredictResponse.__ne__": true, + "google.generativeai.protos.PredictResponse.__new__": true, + "google.generativeai.protos.PredictResponse.__or__": true, + "google.generativeai.protos.PredictResponse.__ror__": true, + "google.generativeai.protos.PredictResponse.copy_from": true, + "google.generativeai.protos.PredictResponse.deserialize": true, + "google.generativeai.protos.PredictResponse.from_json": true, + "google.generativeai.protos.PredictResponse.mro": true, + "google.generativeai.protos.PredictResponse.pb": true, + "google.generativeai.protos.PredictResponse.predictions": true, + "google.generativeai.protos.PredictResponse.serialize": true, + "google.generativeai.protos.PredictResponse.to_dict": true, + "google.generativeai.protos.PredictResponse.to_json": true, + "google.generativeai.protos.PredictResponse.wrap": true, "google.generativeai.protos.QueryCorpusRequest": false, "google.generativeai.protos.QueryCorpusRequest.__call__": true, "google.generativeai.protos.QueryCorpusRequest.__eq__": true, @@ -6365,6 +6796,28 @@ "google.generativeai.protos.RelevantChunk.to_dict": true, "google.generativeai.protos.RelevantChunk.to_json": true, "google.generativeai.protos.RelevantChunk.wrap": true, + "google.generativeai.protos.RetrievalMetadata": false, + "google.generativeai.protos.RetrievalMetadata.__call__": true, + "google.generativeai.protos.RetrievalMetadata.__eq__": true, + "google.generativeai.protos.RetrievalMetadata.__ge__": true, + "google.generativeai.protos.RetrievalMetadata.__gt__": true, + "google.generativeai.protos.RetrievalMetadata.__init__": true, + "google.generativeai.protos.RetrievalMetadata.__le__": true, + "google.generativeai.protos.RetrievalMetadata.__lt__": true, + "google.generativeai.protos.RetrievalMetadata.__ne__": true, + "google.generativeai.protos.RetrievalMetadata.__new__": true, + "google.generativeai.protos.RetrievalMetadata.__or__": true, + "google.generativeai.protos.RetrievalMetadata.__ror__": true, + "google.generativeai.protos.RetrievalMetadata.copy_from": true, + "google.generativeai.protos.RetrievalMetadata.deserialize": true, + "google.generativeai.protos.RetrievalMetadata.from_json": true, + "google.generativeai.protos.RetrievalMetadata.google_search_dynamic_retrieval_score": true, + "google.generativeai.protos.RetrievalMetadata.mro": true, + "google.generativeai.protos.RetrievalMetadata.pb": true, + "google.generativeai.protos.RetrievalMetadata.serialize": true, + "google.generativeai.protos.RetrievalMetadata.to_dict": true, + "google.generativeai.protos.RetrievalMetadata.to_json": true, + "google.generativeai.protos.RetrievalMetadata.wrap": true, "google.generativeai.protos.SafetyFeedback": false, "google.generativeai.protos.SafetyFeedback.__call__": true, "google.generativeai.protos.SafetyFeedback.__eq__": true, @@ -6443,6 +6896,7 @@ "google.generativeai.protos.SafetyRating.HarmProbability.denominator": true, "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes": true, "google.generativeai.protos.SafetyRating.HarmProbability.imag": true, + "google.generativeai.protos.SafetyRating.HarmProbability.is_integer": true, "google.generativeai.protos.SafetyRating.HarmProbability.numerator": true, "google.generativeai.protos.SafetyRating.HarmProbability.real": true, "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": true, @@ -6476,6 +6930,7 @@ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_NONE": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.OFF": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": true, @@ -6524,6 +6979,7 @@ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": true, + "google.generativeai.protos.SafetySetting.HarmBlockThreshold.is_integer": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": true, "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": true, @@ -6591,6 +7047,8 @@ "google.generativeai.protos.Schema.format_": true, "google.generativeai.protos.Schema.from_json": true, "google.generativeai.protos.Schema.items": true, + "google.generativeai.protos.Schema.max_items": true, + "google.generativeai.protos.Schema.min_items": true, "google.generativeai.protos.Schema.mro": true, "google.generativeai.protos.Schema.nullable": true, "google.generativeai.protos.Schema.pb": true, @@ -6601,6 +7059,54 @@ "google.generativeai.protos.Schema.to_json": true, "google.generativeai.protos.Schema.type_": true, "google.generativeai.protos.Schema.wrap": true, + "google.generativeai.protos.SearchEntryPoint": false, + "google.generativeai.protos.SearchEntryPoint.__call__": true, + "google.generativeai.protos.SearchEntryPoint.__eq__": true, + "google.generativeai.protos.SearchEntryPoint.__ge__": true, + "google.generativeai.protos.SearchEntryPoint.__gt__": true, + "google.generativeai.protos.SearchEntryPoint.__init__": true, + "google.generativeai.protos.SearchEntryPoint.__le__": true, + "google.generativeai.protos.SearchEntryPoint.__lt__": true, + "google.generativeai.protos.SearchEntryPoint.__ne__": true, + "google.generativeai.protos.SearchEntryPoint.__new__": true, + "google.generativeai.protos.SearchEntryPoint.__or__": true, + "google.generativeai.protos.SearchEntryPoint.__ror__": true, + "google.generativeai.protos.SearchEntryPoint.copy_from": true, + "google.generativeai.protos.SearchEntryPoint.deserialize": true, + "google.generativeai.protos.SearchEntryPoint.from_json": true, + "google.generativeai.protos.SearchEntryPoint.mro": true, + "google.generativeai.protos.SearchEntryPoint.pb": true, + "google.generativeai.protos.SearchEntryPoint.rendered_content": true, + "google.generativeai.protos.SearchEntryPoint.sdk_blob": true, + "google.generativeai.protos.SearchEntryPoint.serialize": true, + "google.generativeai.protos.SearchEntryPoint.to_dict": true, + "google.generativeai.protos.SearchEntryPoint.to_json": true, + "google.generativeai.protos.SearchEntryPoint.wrap": true, + "google.generativeai.protos.Segment": false, + "google.generativeai.protos.Segment.__call__": true, + "google.generativeai.protos.Segment.__eq__": true, + "google.generativeai.protos.Segment.__ge__": true, + "google.generativeai.protos.Segment.__gt__": true, + "google.generativeai.protos.Segment.__init__": true, + "google.generativeai.protos.Segment.__le__": true, + "google.generativeai.protos.Segment.__lt__": true, + "google.generativeai.protos.Segment.__ne__": true, + "google.generativeai.protos.Segment.__new__": true, + "google.generativeai.protos.Segment.__or__": true, + "google.generativeai.protos.Segment.__ror__": true, + "google.generativeai.protos.Segment.copy_from": true, + "google.generativeai.protos.Segment.deserialize": true, + "google.generativeai.protos.Segment.end_index": true, + "google.generativeai.protos.Segment.from_json": true, + "google.generativeai.protos.Segment.mro": true, + "google.generativeai.protos.Segment.part_index": true, + "google.generativeai.protos.Segment.pb": true, + "google.generativeai.protos.Segment.serialize": true, + "google.generativeai.protos.Segment.start_index": true, + "google.generativeai.protos.Segment.text": true, + "google.generativeai.protos.Segment.to_dict": true, + "google.generativeai.protos.Segment.to_json": true, + "google.generativeai.protos.Segment.wrap": true, "google.generativeai.protos.SemanticRetrieverConfig": false, "google.generativeai.protos.SemanticRetrieverConfig.__call__": true, "google.generativeai.protos.SemanticRetrieverConfig.__eq__": true, @@ -6706,6 +7212,7 @@ "google.generativeai.protos.TaskType.denominator": true, "google.generativeai.protos.TaskType.from_bytes": true, "google.generativeai.protos.TaskType.imag": true, + "google.generativeai.protos.TaskType.is_integer": true, "google.generativeai.protos.TaskType.numerator": true, "google.generativeai.protos.TaskType.real": true, "google.generativeai.protos.TaskType.to_bytes": true, @@ -6772,6 +7279,7 @@ "google.generativeai.protos.Tool.deserialize": true, "google.generativeai.protos.Tool.from_json": true, "google.generativeai.protos.Tool.function_declarations": true, + "google.generativeai.protos.Tool.google_search_retrieval": true, "google.generativeai.protos.Tool.mro": true, "google.generativeai.protos.Tool.pb": true, "google.generativeai.protos.Tool.serialize": true, @@ -6898,6 +7406,7 @@ "google.generativeai.protos.TunedModel.State.denominator": true, "google.generativeai.protos.TunedModel.State.from_bytes": true, "google.generativeai.protos.TunedModel.State.imag": true, + "google.generativeai.protos.TunedModel.State.is_integer": true, "google.generativeai.protos.TunedModel.State.numerator": true, "google.generativeai.protos.TunedModel.State.real": true, "google.generativeai.protos.TunedModel.State.to_bytes": true, @@ -6922,6 +7431,7 @@ "google.generativeai.protos.TunedModel.mro": true, "google.generativeai.protos.TunedModel.name": true, "google.generativeai.protos.TunedModel.pb": true, + "google.generativeai.protos.TunedModel.reader_project_numbers": true, "google.generativeai.protos.TunedModel.serialize": true, "google.generativeai.protos.TunedModel.state": true, "google.generativeai.protos.TunedModel.temperature": true, @@ -7108,6 +7618,7 @@ "google.generativeai.protos.Type.denominator": true, "google.generativeai.protos.Type.from_bytes": true, "google.generativeai.protos.Type.imag": true, + "google.generativeai.protos.Type.is_integer": true, "google.generativeai.protos.Type.numerator": true, "google.generativeai.protos.Type.real": true, "google.generativeai.protos.Type.to_bytes": true, @@ -7291,18 +7802,6 @@ "google.generativeai.types.AsyncGenerateContentResponse.text": true, "google.generativeai.types.AsyncGenerateContentResponse.to_dict": true, "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata": true, - "google.generativeai.types.AuthorError": false, - "google.generativeai.types.AuthorError.__eq__": true, - "google.generativeai.types.AuthorError.__ge__": true, - "google.generativeai.types.AuthorError.__gt__": true, - "google.generativeai.types.AuthorError.__init__": true, - "google.generativeai.types.AuthorError.__le__": true, - "google.generativeai.types.AuthorError.__lt__": true, - "google.generativeai.types.AuthorError.__ne__": true, - "google.generativeai.types.AuthorError.__new__": true, - "google.generativeai.types.AuthorError.add_note": true, - "google.generativeai.types.AuthorError.args": true, - "google.generativeai.types.AuthorError.with_traceback": true, "google.generativeai.types.BaseModelNameOptions": false, "google.generativeai.types.BlobDict": false, "google.generativeai.types.BlobDict.__contains__": true, @@ -7395,6 +7894,7 @@ "google.generativeai.types.BlockedReason.denominator": true, "google.generativeai.types.BlockedReason.from_bytes": true, "google.generativeai.types.BlockedReason.imag": true, + "google.generativeai.types.BlockedReason.is_integer": true, "google.generativeai.types.BlockedReason.numerator": true, "google.generativeai.types.BlockedReason.real": true, "google.generativeai.types.BlockedReason.to_bytes": true, @@ -7426,20 +7926,6 @@ "google.generativeai.types.CallableFunctionDeclaration.name": true, "google.generativeai.types.CallableFunctionDeclaration.parameters": true, "google.generativeai.types.CallableFunctionDeclaration.to_proto": true, - "google.generativeai.types.ChatResponse": false, - "google.generativeai.types.ChatResponse.__eq__": true, - "google.generativeai.types.ChatResponse.__ge__": true, - "google.generativeai.types.ChatResponse.__gt__": true, - "google.generativeai.types.ChatResponse.__init__": true, - "google.generativeai.types.ChatResponse.__le__": true, - "google.generativeai.types.ChatResponse.__lt__": true, - "google.generativeai.types.ChatResponse.__ne__": true, - "google.generativeai.types.ChatResponse.__new__": true, - "google.generativeai.types.ChatResponse.last": true, - "google.generativeai.types.ChatResponse.reply": true, - "google.generativeai.types.ChatResponse.to_dict": true, - "google.generativeai.types.ChatResponse.top_k": true, - "google.generativeai.types.ChatResponse.top_p": true, "google.generativeai.types.CitationMetadataDict": false, "google.generativeai.types.CitationMetadataDict.__contains__": true, "google.generativeai.types.CitationMetadataDict.__eq__": true, @@ -7492,16 +7978,6 @@ "google.generativeai.types.CitationSourceDict.setdefault": true, "google.generativeai.types.CitationSourceDict.update": true, "google.generativeai.types.CitationSourceDict.values": true, - "google.generativeai.types.Completion": false, - "google.generativeai.types.Completion.__eq__": true, - "google.generativeai.types.Completion.__ge__": true, - "google.generativeai.types.Completion.__gt__": true, - "google.generativeai.types.Completion.__init__": true, - "google.generativeai.types.Completion.__le__": true, - "google.generativeai.types.Completion.__lt__": true, - "google.generativeai.types.Completion.__ne__": true, - "google.generativeai.types.Completion.__new__": true, - "google.generativeai.types.Completion.to_dict": true, "google.generativeai.types.ContentDict": false, "google.generativeai.types.ContentDict.__contains__": true, "google.generativeai.types.ContentDict.__eq__": true, @@ -7556,34 +8032,6 @@ "google.generativeai.types.ContentFilterDict.values": true, "google.generativeai.types.ContentType": false, "google.generativeai.types.ContentsType": false, - "google.generativeai.types.ExampleDict": false, - "google.generativeai.types.ExampleDict.__contains__": true, - "google.generativeai.types.ExampleDict.__eq__": true, - "google.generativeai.types.ExampleDict.__ge__": true, - "google.generativeai.types.ExampleDict.__getitem__": true, - "google.generativeai.types.ExampleDict.__gt__": true, - "google.generativeai.types.ExampleDict.__init__": true, - "google.generativeai.types.ExampleDict.__iter__": true, - "google.generativeai.types.ExampleDict.__le__": true, - "google.generativeai.types.ExampleDict.__len__": true, - "google.generativeai.types.ExampleDict.__lt__": true, - "google.generativeai.types.ExampleDict.__ne__": true, - "google.generativeai.types.ExampleDict.__new__": true, - "google.generativeai.types.ExampleDict.__or__": true, - "google.generativeai.types.ExampleDict.__ror__": true, - "google.generativeai.types.ExampleDict.clear": true, - "google.generativeai.types.ExampleDict.copy": true, - "google.generativeai.types.ExampleDict.fromkeys": true, - "google.generativeai.types.ExampleDict.get": true, - "google.generativeai.types.ExampleDict.items": true, - "google.generativeai.types.ExampleDict.keys": true, - "google.generativeai.types.ExampleDict.pop": true, - "google.generativeai.types.ExampleDict.popitem": true, - "google.generativeai.types.ExampleDict.setdefault": true, - "google.generativeai.types.ExampleDict.update": true, - "google.generativeai.types.ExampleDict.values": true, - "google.generativeai.types.ExampleOptions": false, - "google.generativeai.types.ExamplesOptions": false, "google.generativeai.types.File": false, "google.generativeai.types.File.__eq__": true, "google.generativeai.types.File.__ge__": true, @@ -7693,9 +8141,14 @@ "google.generativeai.types.GenerationConfig.__ne__": true, "google.generativeai.types.GenerationConfig.__new__": true, "google.generativeai.types.GenerationConfig.candidate_count": true, + "google.generativeai.types.GenerationConfig.frequency_penalty": true, + "google.generativeai.types.GenerationConfig.logprobs": true, "google.generativeai.types.GenerationConfig.max_output_tokens": true, + "google.generativeai.types.GenerationConfig.presence_penalty": true, + "google.generativeai.types.GenerationConfig.response_logprobs": true, "google.generativeai.types.GenerationConfig.response_mime_type": true, "google.generativeai.types.GenerationConfig.response_schema": true, + "google.generativeai.types.GenerationConfig.seed": true, "google.generativeai.types.GenerationConfig.stop_sequences": true, "google.generativeai.types.GenerationConfig.temperature": true, "google.generativeai.types.GenerationConfig.top_k": true, @@ -7733,6 +8186,7 @@ "google.generativeai.types.HarmBlockThreshold.BLOCK_NONE": true, "google.generativeai.types.HarmBlockThreshold.BLOCK_ONLY_HIGH": true, "google.generativeai.types.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true, + "google.generativeai.types.HarmBlockThreshold.OFF": true, "google.generativeai.types.HarmBlockThreshold.__abs__": true, "google.generativeai.types.HarmBlockThreshold.__add__": true, "google.generativeai.types.HarmBlockThreshold.__and__": true, @@ -7781,6 +8235,7 @@ "google.generativeai.types.HarmBlockThreshold.denominator": true, "google.generativeai.types.HarmBlockThreshold.from_bytes": true, "google.generativeai.types.HarmBlockThreshold.imag": true, + "google.generativeai.types.HarmBlockThreshold.is_integer": true, "google.generativeai.types.HarmBlockThreshold.numerator": true, "google.generativeai.types.HarmBlockThreshold.real": true, "google.generativeai.types.HarmBlockThreshold.to_bytes": true, @@ -7838,6 +8293,7 @@ "google.generativeai.types.HarmCategory.denominator": true, "google.generativeai.types.HarmCategory.from_bytes": true, "google.generativeai.types.HarmCategory.imag": true, + "google.generativeai.types.HarmCategory.is_integer": true, "google.generativeai.types.HarmCategory.numerator": true, "google.generativeai.types.HarmCategory.real": true, "google.generativeai.types.HarmCategory.to_bytes": true, @@ -7895,6 +8351,7 @@ "google.generativeai.types.HarmProbability.denominator": true, "google.generativeai.types.HarmProbability.from_bytes": true, "google.generativeai.types.HarmProbability.imag": true, + "google.generativeai.types.HarmProbability.is_integer": true, "google.generativeai.types.HarmProbability.numerator": true, "google.generativeai.types.HarmProbability.real": true, "google.generativeai.types.HarmProbability.to_bytes": true, @@ -7910,61 +8367,6 @@ "google.generativeai.types.IncompleteIterationError.add_note": true, "google.generativeai.types.IncompleteIterationError.args": true, "google.generativeai.types.IncompleteIterationError.with_traceback": true, - "google.generativeai.types.MessageDict": false, - "google.generativeai.types.MessageDict.__contains__": true, - "google.generativeai.types.MessageDict.__eq__": true, - "google.generativeai.types.MessageDict.__ge__": true, - "google.generativeai.types.MessageDict.__getitem__": true, - "google.generativeai.types.MessageDict.__gt__": true, - "google.generativeai.types.MessageDict.__init__": true, - "google.generativeai.types.MessageDict.__iter__": true, - "google.generativeai.types.MessageDict.__le__": true, - "google.generativeai.types.MessageDict.__len__": true, - "google.generativeai.types.MessageDict.__lt__": true, - "google.generativeai.types.MessageDict.__ne__": true, - "google.generativeai.types.MessageDict.__new__": true, - "google.generativeai.types.MessageDict.__or__": true, - "google.generativeai.types.MessageDict.__ror__": true, - "google.generativeai.types.MessageDict.clear": true, - "google.generativeai.types.MessageDict.copy": true, - "google.generativeai.types.MessageDict.fromkeys": true, - "google.generativeai.types.MessageDict.get": true, - "google.generativeai.types.MessageDict.items": true, - "google.generativeai.types.MessageDict.keys": true, - "google.generativeai.types.MessageDict.pop": true, - "google.generativeai.types.MessageDict.popitem": true, - "google.generativeai.types.MessageDict.setdefault": true, - "google.generativeai.types.MessageDict.update": true, - "google.generativeai.types.MessageDict.values": true, - "google.generativeai.types.MessageOptions": false, - "google.generativeai.types.MessagePromptDict": false, - "google.generativeai.types.MessagePromptDict.__contains__": true, - "google.generativeai.types.MessagePromptDict.__eq__": true, - "google.generativeai.types.MessagePromptDict.__ge__": true, - "google.generativeai.types.MessagePromptDict.__getitem__": true, - "google.generativeai.types.MessagePromptDict.__gt__": true, - "google.generativeai.types.MessagePromptDict.__init__": true, - "google.generativeai.types.MessagePromptDict.__iter__": true, - "google.generativeai.types.MessagePromptDict.__le__": true, - "google.generativeai.types.MessagePromptDict.__len__": true, - "google.generativeai.types.MessagePromptDict.__lt__": true, - "google.generativeai.types.MessagePromptDict.__ne__": true, - "google.generativeai.types.MessagePromptDict.__new__": true, - "google.generativeai.types.MessagePromptDict.__or__": true, - "google.generativeai.types.MessagePromptDict.__ror__": true, - "google.generativeai.types.MessagePromptDict.clear": true, - "google.generativeai.types.MessagePromptDict.copy": true, - "google.generativeai.types.MessagePromptDict.fromkeys": true, - "google.generativeai.types.MessagePromptDict.get": true, - "google.generativeai.types.MessagePromptDict.items": true, - "google.generativeai.types.MessagePromptDict.keys": true, - "google.generativeai.types.MessagePromptDict.pop": true, - "google.generativeai.types.MessagePromptDict.popitem": true, - "google.generativeai.types.MessagePromptDict.setdefault": true, - "google.generativeai.types.MessagePromptDict.update": true, - "google.generativeai.types.MessagePromptDict.values": true, - "google.generativeai.types.MessagePromptOptions": false, - "google.generativeai.types.MessagesOptions": false, "google.generativeai.types.Model": false, "google.generativeai.types.Model.__eq__": true, "google.generativeai.types.Model.__ge__": true, @@ -8061,32 +8463,6 @@ "google.generativeai.types.RequestOptions.keys": true, "google.generativeai.types.RequestOptions.values": true, "google.generativeai.types.RequestOptionsType": false, - "google.generativeai.types.ResponseDict": false, - "google.generativeai.types.ResponseDict.__contains__": true, - "google.generativeai.types.ResponseDict.__eq__": true, - "google.generativeai.types.ResponseDict.__ge__": true, - "google.generativeai.types.ResponseDict.__getitem__": true, - "google.generativeai.types.ResponseDict.__gt__": true, - "google.generativeai.types.ResponseDict.__init__": true, - "google.generativeai.types.ResponseDict.__iter__": true, - "google.generativeai.types.ResponseDict.__le__": true, - "google.generativeai.types.ResponseDict.__len__": true, - "google.generativeai.types.ResponseDict.__lt__": true, - "google.generativeai.types.ResponseDict.__ne__": true, - "google.generativeai.types.ResponseDict.__new__": true, - "google.generativeai.types.ResponseDict.__or__": true, - "google.generativeai.types.ResponseDict.__ror__": true, - "google.generativeai.types.ResponseDict.clear": true, - "google.generativeai.types.ResponseDict.copy": true, - "google.generativeai.types.ResponseDict.fromkeys": true, - "google.generativeai.types.ResponseDict.get": true, - "google.generativeai.types.ResponseDict.items": true, - "google.generativeai.types.ResponseDict.keys": true, - "google.generativeai.types.ResponseDict.pop": true, - "google.generativeai.types.ResponseDict.popitem": true, - "google.generativeai.types.ResponseDict.setdefault": true, - "google.generativeai.types.ResponseDict.update": true, - "google.generativeai.types.ResponseDict.values": true, "google.generativeai.types.SafetyFeedbackDict": false, "google.generativeai.types.SafetyFeedbackDict.__contains__": true, "google.generativeai.types.SafetyFeedbackDict.__eq__": true, @@ -8226,6 +8602,7 @@ "google.generativeai.types.Tool.__new__": true, "google.generativeai.types.Tool.code_execution": true, "google.generativeai.types.Tool.function_declarations": true, + "google.generativeai.types.Tool.google_search_retrieval": true, "google.generativeai.types.Tool.to_proto": true, "google.generativeai.types.ToolDict": false, "google.generativeai.types.ToolDict.__contains__": true, @@ -8269,6 +8646,7 @@ "google.generativeai.types.TunedModel.display_name": true, "google.generativeai.types.TunedModel.name": true, "google.generativeai.types.TunedModel.permissions": true, + "google.generativeai.types.TunedModel.reader_project_numbers": true, "google.generativeai.types.TunedModel.source_model": true, "google.generativeai.types.TunedModel.state": true, "google.generativeai.types.TunedModel.temperature": true, @@ -8330,6 +8708,7 @@ "google.generativeai.types.TunedModelState.denominator": true, "google.generativeai.types.TunedModelState.from_bytes": true, "google.generativeai.types.TunedModelState.imag": true, + "google.generativeai.types.TunedModelState.is_integer": true, "google.generativeai.types.TunedModelState.numerator": true, "google.generativeai.types.TunedModelState.real": true, "google.generativeai.types.TunedModelState.to_bytes": true, @@ -8356,18 +8735,21 @@ "google.generativeai.GenerativeModel.generate_content": "google.generativeai.generative_models.GenerativeModel.generate_content", "google.generativeai.GenerativeModel.generate_content_async": "google.generativeai.generative_models.GenerativeModel.generate_content_async", "google.generativeai.GenerativeModel.start_chat": "google.generativeai.generative_models.GenerativeModel.start_chat", - "google.generativeai.chat": "google.generativeai.discuss.chat", - "google.generativeai.chat_async": "google.generativeai.discuss.chat_async", + "google.generativeai.caching": "google.generativeai.caching", + "google.generativeai.caching.CachedContent": "google.generativeai.caching.CachedContent", + "google.generativeai.caching.CachedContent.__init__": "google.generativeai.caching.CachedContent.__init__", + "google.generativeai.caching.CachedContent.create": "google.generativeai.caching.CachedContent.create", + "google.generativeai.caching.CachedContent.delete": "google.generativeai.caching.CachedContent.delete", + "google.generativeai.caching.CachedContent.get": "google.generativeai.caching.CachedContent.get", + "google.generativeai.caching.CachedContent.list": "google.generativeai.caching.CachedContent.list", + "google.generativeai.caching.CachedContent.update": "google.generativeai.caching.CachedContent.update", + "google.generativeai.caching.get_default_cache_client": "google.generativeai.client.get_default_cache_client", "google.generativeai.configure": "google.generativeai.client.configure", - "google.generativeai.count_message_tokens": "google.generativeai.discuss.count_message_tokens", - "google.generativeai.count_text_tokens": "google.generativeai.text.count_text_tokens", "google.generativeai.create_tuned_model": "google.generativeai.models.create_tuned_model", "google.generativeai.delete_file": "google.generativeai.files.delete_file", "google.generativeai.delete_tuned_model": "google.generativeai.models.delete_tuned_model", "google.generativeai.embed_content": "google.generativeai.embedding.embed_content", "google.generativeai.embed_content_async": "google.generativeai.embedding.embed_content_async", - "google.generativeai.generate_embeddings": "google.generativeai.text.generate_embeddings", - "google.generativeai.generate_text": "google.generativeai.text.generate_text", "google.generativeai.get_base_model": "google.generativeai.models.get_base_model", "google.generativeai.get_file": "google.generativeai.files.get_file", "google.generativeai.get_model": "google.generativeai.models.get_model", @@ -8877,6 +9259,20 @@ "google.generativeai.protos.Document.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.Document.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.Document.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.DynamicRetrievalConfig": "google.ai.generativelanguage_v1beta.types.content.DynamicRetrievalConfig", + "google.generativeai.protos.DynamicRetrievalConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.DynamicRetrievalConfig.Mode", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__contains__": "enum.EnumType.__contains__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__getitem__": "enum.EnumType.__getitem__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__iter__": "enum.EnumType.__iter__", + "google.generativeai.protos.DynamicRetrievalConfig.Mode.__len__": "enum.EnumType.__len__", + "google.generativeai.protos.DynamicRetrievalConfig.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.DynamicRetrievalConfig.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.DynamicRetrievalConfig.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.DynamicRetrievalConfig.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.DynamicRetrievalConfig.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.DynamicRetrievalConfig.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.DynamicRetrievalConfig.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.DynamicRetrievalConfig.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.EmbedContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentRequest", "google.generativeai.protos.EmbedContentRequest.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.EmbedContentRequest.deserialize": "proto.message.MessageMeta.deserialize", @@ -9204,6 +9600,15 @@ "google.generativeai.protos.GetTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.GetTunedModelRequest.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.GetTunedModelRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GoogleSearchRetrieval": "google.ai.generativelanguage_v1beta.types.content.GoogleSearchRetrieval", + "google.generativeai.protos.GoogleSearchRetrieval.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GoogleSearchRetrieval.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GoogleSearchRetrieval.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GoogleSearchRetrieval.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GoogleSearchRetrieval.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GoogleSearchRetrieval.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GoogleSearchRetrieval.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GoogleSearchRetrieval.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.GroundingAttribution": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingAttribution", "google.generativeai.protos.GroundingAttribution.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.GroundingAttribution.deserialize": "proto.message.MessageMeta.deserialize", @@ -9213,6 +9618,33 @@ "google.generativeai.protos.GroundingAttribution.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.GroundingAttribution.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.GroundingAttribution.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingChunk": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingChunk", + "google.generativeai.protos.GroundingChunk.Web": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingChunk.Web", + "google.generativeai.protos.GroundingChunk.Web.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingChunk.Web.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingChunk.Web.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingChunk.Web.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingChunk.Web.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingChunk.Web.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingChunk.Web.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingChunk.Web.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingChunk.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingChunk.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingChunk.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingChunk.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingChunk.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingChunk.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingChunk.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingChunk.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingMetadata", + "google.generativeai.protos.GroundingMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingMetadata.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.GroundingPassage": "google.ai.generativelanguage_v1beta.types.content.GroundingPassage", "google.generativeai.protos.GroundingPassage.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.GroundingPassage.deserialize": "proto.message.MessageMeta.deserialize", @@ -9231,6 +9663,15 @@ "google.generativeai.protos.GroundingPassages.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.GroundingPassages.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.GroundingPassages.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.GroundingSupport": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingSupport", + "google.generativeai.protos.GroundingSupport.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.GroundingSupport.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.GroundingSupport.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.GroundingSupport.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.GroundingSupport.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.GroundingSupport.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.GroundingSupport.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.GroundingSupport.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.HarmCategory": "google.ai.generativelanguage_v1beta.types.safety.HarmCategory", "google.generativeai.protos.HarmCategory.__contains__": "enum.EnumType.__contains__", "google.generativeai.protos.HarmCategory.__getitem__": "enum.EnumType.__getitem__", @@ -9389,6 +9830,33 @@ "google.generativeai.protos.ListTunedModelsResponse.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.ListTunedModelsResponse.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.ListTunedModelsResponse.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.LogprobsResult": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult", + "google.generativeai.protos.LogprobsResult.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult.Candidate", + "google.generativeai.protos.LogprobsResult.Candidate.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.LogprobsResult.Candidate.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.LogprobsResult.Candidate.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.LogprobsResult.Candidate.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.LogprobsResult.Candidate.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.LogprobsResult.Candidate.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.LogprobsResult.Candidate.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.LogprobsResult.Candidate.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.LogprobsResult.TopCandidates": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult.TopCandidates", + "google.generativeai.protos.LogprobsResult.TopCandidates.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.LogprobsResult.TopCandidates.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.LogprobsResult.TopCandidates.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.LogprobsResult.TopCandidates.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.LogprobsResult.TopCandidates.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.LogprobsResult.TopCandidates.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.LogprobsResult.TopCandidates.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.LogprobsResult.TopCandidates.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.LogprobsResult.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.LogprobsResult.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.LogprobsResult.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.LogprobsResult.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.LogprobsResult.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.LogprobsResult.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.LogprobsResult.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.LogprobsResult.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.Message": "google.ai.generativelanguage_v1beta.types.discuss_service.Message", "google.generativeai.protos.Message.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.Message.deserialize": "proto.message.MessageMeta.deserialize", @@ -9453,6 +9921,24 @@ "google.generativeai.protos.Permission.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.Permission.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.Permission.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.PredictRequest": "google.ai.generativelanguage_v1beta.types.prediction_service.PredictRequest", + "google.generativeai.protos.PredictRequest.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.PredictRequest.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.PredictRequest.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.PredictRequest.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.PredictRequest.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.PredictRequest.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.PredictRequest.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.PredictRequest.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.PredictResponse": "google.ai.generativelanguage_v1beta.types.prediction_service.PredictResponse", + "google.generativeai.protos.PredictResponse.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.PredictResponse.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.PredictResponse.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.PredictResponse.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.PredictResponse.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.PredictResponse.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.PredictResponse.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.PredictResponse.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.QueryCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusRequest", "google.generativeai.protos.QueryCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.QueryCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize", @@ -9498,6 +9984,15 @@ "google.generativeai.protos.RelevantChunk.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.RelevantChunk.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.RelevantChunk.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.RetrievalMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.RetrievalMetadata", + "google.generativeai.protos.RetrievalMetadata.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.RetrievalMetadata.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.RetrievalMetadata.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.RetrievalMetadata.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.RetrievalMetadata.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.RetrievalMetadata.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.RetrievalMetadata.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.RetrievalMetadata.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.SafetyFeedback": "google.ai.generativelanguage_v1beta.types.safety.SafetyFeedback", "google.generativeai.protos.SafetyFeedback.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.SafetyFeedback.deserialize": "proto.message.MessageMeta.deserialize", @@ -9543,6 +10038,24 @@ "google.generativeai.protos.Schema.to_dict": "proto.message.MessageMeta.to_dict", "google.generativeai.protos.Schema.to_json": "proto.message.MessageMeta.to_json", "google.generativeai.protos.Schema.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.SearchEntryPoint": "google.ai.generativelanguage_v1beta.types.generative_service.SearchEntryPoint", + "google.generativeai.protos.SearchEntryPoint.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.SearchEntryPoint.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.SearchEntryPoint.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.SearchEntryPoint.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.SearchEntryPoint.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.SearchEntryPoint.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.SearchEntryPoint.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.SearchEntryPoint.wrap": "proto.message.MessageMeta.wrap", + "google.generativeai.protos.Segment": "google.ai.generativelanguage_v1beta.types.generative_service.Segment", + "google.generativeai.protos.Segment.copy_from": "proto.message.MessageMeta.copy_from", + "google.generativeai.protos.Segment.deserialize": "proto.message.MessageMeta.deserialize", + "google.generativeai.protos.Segment.from_json": "proto.message.MessageMeta.from_json", + "google.generativeai.protos.Segment.pb": "proto.message.MessageMeta.pb", + "google.generativeai.protos.Segment.serialize": "proto.message.MessageMeta.serialize", + "google.generativeai.protos.Segment.to_dict": "proto.message.MessageMeta.to_dict", + "google.generativeai.protos.Segment.to_json": "proto.message.MessageMeta.to_json", + "google.generativeai.protos.Segment.wrap": "proto.message.MessageMeta.wrap", "google.generativeai.protos.SemanticRetrieverConfig": "google.ai.generativelanguage_v1beta.types.generative_service.SemanticRetrieverConfig", "google.generativeai.protos.SemanticRetrieverConfig.copy_from": "proto.message.MessageMeta.copy_from", "google.generativeai.protos.SemanticRetrieverConfig.deserialize": "proto.message.MessageMeta.deserialize", @@ -9749,7 +10262,6 @@ "google.generativeai.types.AsyncGenerateContentResponse.from_response": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_response", "google.generativeai.types.AsyncGenerateContentResponse.resolve": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.resolve", "google.generativeai.types.AsyncGenerateContentResponse.to_dict": "google.generativeai.types.generation_types.BaseGenerateContentResponse.to_dict", - "google.generativeai.types.AuthorError": "google.generativeai.types.discuss_types.AuthorError", "google.generativeai.types.BlobDict": "google.generativeai.types.content_types.BlobDict", "google.generativeai.types.BlockedPromptException": "google.generativeai.types.generation_types.BlockedPromptException", "google.generativeai.types.BlockedReason": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter.BlockedReason", @@ -9762,18 +10274,10 @@ "google.generativeai.types.CallableFunctionDeclaration.__call__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__call__", "google.generativeai.types.CallableFunctionDeclaration.__init__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__init__", "google.generativeai.types.CallableFunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto", - "google.generativeai.types.ChatResponse": "google.generativeai.types.discuss_types.ChatResponse", - "google.generativeai.types.ChatResponse.__eq__": "google.generativeai.types.discuss_types.ChatResponse.__eq__", - "google.generativeai.types.ChatResponse.reply": "google.generativeai.types.discuss_types.ChatResponse.reply", - "google.generativeai.types.ChatResponse.to_dict": "google.generativeai.types.discuss_types.ChatResponse.to_dict", "google.generativeai.types.CitationMetadataDict": "google.generativeai.types.citation_types.CitationMetadataDict", "google.generativeai.types.CitationSourceDict": "google.generativeai.types.citation_types.CitationSourceDict", - "google.generativeai.types.Completion": "google.generativeai.types.text_types.Completion", - "google.generativeai.types.Completion.__eq__": "google.generativeai.types.text_types.Completion.__eq__", - "google.generativeai.types.Completion.to_dict": "google.generativeai.types.text_types.Completion.to_dict", "google.generativeai.types.ContentDict": "google.generativeai.types.content_types.ContentDict", "google.generativeai.types.ContentFilterDict": "google.generativeai.types.safety_types.ContentFilterDict", - "google.generativeai.types.ExampleDict": "google.generativeai.types.discuss_types.ExampleDict", "google.generativeai.types.File": "google.generativeai.types.file_types.File", "google.generativeai.types.File.__init__": "google.generativeai.types.file_types.File.__init__", "google.generativeai.types.File.delete": "google.generativeai.types.file_types.File.delete", @@ -9815,8 +10319,6 @@ "google.generativeai.types.HarmProbability.__iter__": "enum.EnumType.__iter__", "google.generativeai.types.HarmProbability.__len__": "enum.EnumType.__len__", "google.generativeai.types.IncompleteIterationError": "google.generativeai.types.generation_types.IncompleteIterationError", - "google.generativeai.types.MessageDict": "google.generativeai.types.discuss_types.MessageDict", - "google.generativeai.types.MessagePromptDict": "google.generativeai.types.discuss_types.MessagePromptDict", "google.generativeai.types.Model": "google.generativeai.types.model_types.Model", "google.generativeai.types.Model.__eq__": "google.generativeai.types.model_types.Model.__eq__", "google.generativeai.types.Model.__init__": "google.generativeai.types.model_types.Model.__init__", @@ -9853,7 +10355,6 @@ "google.generativeai.types.RequestOptions.items": "collections.abc.Mapping.items", "google.generativeai.types.RequestOptions.keys": "collections.abc.Mapping.keys", "google.generativeai.types.RequestOptions.values": "collections.abc.Mapping.values", - "google.generativeai.types.ResponseDict": "google.generativeai.types.discuss_types.ResponseDict", "google.generativeai.types.SafetyFeedbackDict": "google.generativeai.types.safety_types.SafetyFeedbackDict", "google.generativeai.types.SafetyRatingDict": "google.generativeai.types.safety_types.SafetyRatingDict", "google.generativeai.types.SafetySettingDict": "google.generativeai.types.safety_types.SafetySettingDict", diff --git a/docs/api/google/generativeai/_toc.yaml b/docs/api/google/generativeai/_toc.yaml index 99797d5d8..7d18dbf66 100644 --- a/docs/api/google/generativeai/_toc.yaml +++ b/docs/api/google/generativeai/_toc.yaml @@ -7,16 +7,8 @@ toc: path: /api/python/google/generativeai/ChatSession - title: GenerativeModel path: /api/python/google/generativeai/GenerativeModel - - title: chat - path: /api/python/google/generativeai/chat - - title: chat_async - path: /api/python/google/generativeai/chat_async - title: configure path: /api/python/google/generativeai/configure - - title: count_message_tokens - path: /api/python/google/generativeai/count_message_tokens - - title: count_text_tokens - path: /api/python/google/generativeai/count_text_tokens - title: create_tuned_model path: /api/python/google/generativeai/create_tuned_model - title: delete_file @@ -27,10 +19,6 @@ toc: path: /api/python/google/generativeai/embed_content - title: embed_content_async path: /api/python/google/generativeai/embed_content_async - - title: generate_embeddings - path: /api/python/google/generativeai/generate_embeddings - - title: generate_text - path: /api/python/google/generativeai/generate_text - title: get_base_model path: /api/python/google/generativeai/get_base_model - title: get_file @@ -53,6 +41,14 @@ toc: path: /api/python/google/generativeai/update_tuned_model - title: upload_file path: /api/python/google/generativeai/upload_file + - title: caching + section: + - title: Overview + path: /api/python/google/generativeai/caching + - title: CachedContent + path: /api/python/google/generativeai/caching/CachedContent + - title: get_default_cache_client + path: /api/python/google/generativeai/caching/get_default_cache_client - title: protos section: - title: Overview @@ -169,6 +165,10 @@ toc: path: /api/python/google/generativeai/protos/DeleteTunedModelRequest - title: Document path: /api/python/google/generativeai/protos/Document + - title: DynamicRetrievalConfig + path: /api/python/google/generativeai/protos/DynamicRetrievalConfig + - title: DynamicRetrievalConfig.Mode + path: /api/python/google/generativeai/protos/DynamicRetrievalConfig/Mode - title: EmbedContentRequest path: /api/python/google/generativeai/protos/EmbedContentRequest - title: EmbedContentResponse @@ -247,12 +247,22 @@ toc: path: /api/python/google/generativeai/protos/GetPermissionRequest - title: GetTunedModelRequest path: /api/python/google/generativeai/protos/GetTunedModelRequest + - title: GoogleSearchRetrieval + path: /api/python/google/generativeai/protos/GoogleSearchRetrieval - title: GroundingAttribution path: /api/python/google/generativeai/protos/GroundingAttribution + - title: GroundingChunk + path: /api/python/google/generativeai/protos/GroundingChunk + - title: GroundingChunk.Web + path: /api/python/google/generativeai/protos/GroundingChunk/Web + - title: GroundingMetadata + path: /api/python/google/generativeai/protos/GroundingMetadata - title: GroundingPassage path: /api/python/google/generativeai/protos/GroundingPassage - title: GroundingPassages path: /api/python/google/generativeai/protos/GroundingPassages + - title: GroundingSupport + path: /api/python/google/generativeai/protos/GroundingSupport - title: HarmCategory path: /api/python/google/generativeai/protos/HarmCategory - title: Hyperparameters @@ -289,6 +299,12 @@ toc: path: /api/python/google/generativeai/protos/ListTunedModelsRequest - title: ListTunedModelsResponse path: /api/python/google/generativeai/protos/ListTunedModelsResponse + - title: LogprobsResult + path: /api/python/google/generativeai/protos/LogprobsResult + - title: LogprobsResult.Candidate + path: /api/python/google/generativeai/protos/LogprobsResult/Candidate + - title: LogprobsResult.TopCandidates + path: /api/python/google/generativeai/protos/LogprobsResult/TopCandidates - title: Message path: /api/python/google/generativeai/protos/Message - title: MessagePrompt @@ -305,6 +321,10 @@ toc: path: /api/python/google/generativeai/protos/Permission/GranteeType - title: Permission.Role path: /api/python/google/generativeai/protos/Permission/Role + - title: PredictRequest + path: /api/python/google/generativeai/protos/PredictRequest + - title: PredictResponse + path: /api/python/google/generativeai/protos/PredictResponse - title: QueryCorpusRequest path: /api/python/google/generativeai/protos/QueryCorpusRequest - title: QueryCorpusResponse @@ -315,6 +335,8 @@ toc: path: /api/python/google/generativeai/protos/QueryDocumentResponse - title: RelevantChunk path: /api/python/google/generativeai/protos/RelevantChunk + - title: RetrievalMetadata + path: /api/python/google/generativeai/protos/RetrievalMetadata - title: SafetyFeedback path: /api/python/google/generativeai/protos/SafetyFeedback - title: SafetyRating @@ -325,6 +347,10 @@ toc: path: /api/python/google/generativeai/protos/Schema - title: Schema.PropertiesEntry path: /api/python/google/generativeai/protos/Schema/PropertiesEntry + - title: SearchEntryPoint + path: /api/python/google/generativeai/protos/SearchEntryPoint + - title: Segment + path: /api/python/google/generativeai/protos/Segment - title: SemanticRetrieverConfig path: /api/python/google/generativeai/protos/SemanticRetrieverConfig - title: StringList @@ -379,8 +405,6 @@ toc: path: /api/python/google/generativeai/types/AnyModelNameOptions - title: AsyncGenerateContentResponse path: /api/python/google/generativeai/types/AsyncGenerateContentResponse - - title: AuthorError - path: /api/python/google/generativeai/types/AuthorError - title: BaseModelNameOptions path: /api/python/google/generativeai/types/BaseModelNameOptions - title: BlobDict @@ -395,14 +419,10 @@ toc: path: /api/python/google/generativeai/types/BrokenResponseError - title: CallableFunctionDeclaration path: /api/python/google/generativeai/types/CallableFunctionDeclaration - - title: ChatResponse - path: /api/python/google/generativeai/types/ChatResponse - title: CitationMetadataDict path: /api/python/google/generativeai/types/CitationMetadataDict - title: CitationSourceDict path: /api/python/google/generativeai/types/CitationSourceDict - - title: Completion - path: /api/python/google/generativeai/types/Completion - title: ContentDict path: /api/python/google/generativeai/types/ContentDict - title: ContentFilterDict @@ -411,12 +431,6 @@ toc: path: /api/python/google/generativeai/types/ContentType - title: ContentsType path: /api/python/google/generativeai/types/ContentsType - - title: ExampleDict - path: /api/python/google/generativeai/types/ExampleDict - - title: ExampleOptions - path: /api/python/google/generativeai/types/ExampleOptions - - title: ExamplesOptions - path: /api/python/google/generativeai/types/ExamplesOptions - title: File path: /api/python/google/generativeai/types/File - title: FileDataDict @@ -447,16 +461,6 @@ toc: path: /api/python/google/generativeai/types/HarmProbability - title: IncompleteIterationError path: /api/python/google/generativeai/types/IncompleteIterationError - - title: MessageDict - path: /api/python/google/generativeai/types/MessageDict - - title: MessageOptions - path: /api/python/google/generativeai/types/MessageOptions - - title: MessagePromptDict - path: /api/python/google/generativeai/types/MessagePromptDict - - title: MessagePromptOptions - path: /api/python/google/generativeai/types/MessagePromptOptions - - title: MessagesOptions - path: /api/python/google/generativeai/types/MessagesOptions - title: Model path: /api/python/google/generativeai/types/Model - title: ModelsIterable @@ -473,8 +477,6 @@ toc: path: /api/python/google/generativeai/types/RequestOptions - title: RequestOptionsType path: /api/python/google/generativeai/types/RequestOptionsType - - title: ResponseDict - path: /api/python/google/generativeai/types/ResponseDict - title: SafetyFeedbackDict path: /api/python/google/generativeai/types/SafetyFeedbackDict - title: SafetyRatingDict diff --git a/docs/api/google/generativeai/all_symbols.md b/docs/api/google/generativeai/all_symbols.md index a6fa84caf..bc673a13e 100644 --- a/docs/api/google/generativeai/all_symbols.md +++ b/docs/api/google/generativeai/all_symbols.md @@ -7,18 +7,15 @@ * google.generativeai.ChatSession * google.generativeai.GenerationConfig * google.generativeai.GenerativeModel -* google.generativeai.chat -* google.generativeai.chat_async +* google.generativeai.caching +* google.generativeai.caching.CachedContent +* google.generativeai.caching.get_default_cache_client * google.generativeai.configure -* google.generativeai.count_message_tokens -* google.generativeai.count_text_tokens * google.generativeai.create_tuned_model * google.generativeai.delete_file * google.generativeai.delete_tuned_model * google.generativeai.embed_content * google.generativeai.embed_content_async -* google.generativeai.generate_embeddings -* google.generativeai.generate_text * google.generativeai.get_base_model * google.generativeai.get_file * google.generativeai.get_model @@ -86,6 +83,8 @@ * google.generativeai.protos.DeletePermissionRequest * google.generativeai.protos.DeleteTunedModelRequest * google.generativeai.protos.Document +* google.generativeai.protos.DynamicRetrievalConfig +* google.generativeai.protos.DynamicRetrievalConfig.Mode * google.generativeai.protos.EmbedContentRequest * google.generativeai.protos.EmbedContentResponse * google.generativeai.protos.EmbedTextRequest @@ -125,9 +124,14 @@ * google.generativeai.protos.GetModelRequest * google.generativeai.protos.GetPermissionRequest * google.generativeai.protos.GetTunedModelRequest +* google.generativeai.protos.GoogleSearchRetrieval * google.generativeai.protos.GroundingAttribution +* google.generativeai.protos.GroundingChunk +* google.generativeai.protos.GroundingChunk.Web +* google.generativeai.protos.GroundingMetadata * google.generativeai.protos.GroundingPassage * google.generativeai.protos.GroundingPassages +* google.generativeai.protos.GroundingSupport * google.generativeai.protos.HarmCategory * google.generativeai.protos.Hyperparameters * google.generativeai.protos.ListCachedContentsRequest @@ -146,6 +150,9 @@ * google.generativeai.protos.ListPermissionsResponse * google.generativeai.protos.ListTunedModelsRequest * google.generativeai.protos.ListTunedModelsResponse +* google.generativeai.protos.LogprobsResult +* google.generativeai.protos.LogprobsResult.Candidate +* google.generativeai.protos.LogprobsResult.TopCandidates * google.generativeai.protos.Message * google.generativeai.protos.MessagePrompt * google.generativeai.protos.MetadataFilter @@ -154,11 +161,14 @@ * google.generativeai.protos.Permission * google.generativeai.protos.Permission.GranteeType * google.generativeai.protos.Permission.Role +* google.generativeai.protos.PredictRequest +* google.generativeai.protos.PredictResponse * google.generativeai.protos.QueryCorpusRequest * google.generativeai.protos.QueryCorpusResponse * google.generativeai.protos.QueryDocumentRequest * google.generativeai.protos.QueryDocumentResponse * google.generativeai.protos.RelevantChunk +* google.generativeai.protos.RetrievalMetadata * google.generativeai.protos.SafetyFeedback * google.generativeai.protos.SafetyRating * google.generativeai.protos.SafetyRating.HarmProbability @@ -166,6 +176,8 @@ * google.generativeai.protos.SafetySetting.HarmBlockThreshold * google.generativeai.protos.Schema * google.generativeai.protos.Schema.PropertiesEntry +* google.generativeai.protos.SearchEntryPoint +* google.generativeai.protos.Segment * google.generativeai.protos.SemanticRetrieverConfig * google.generativeai.protos.StringList * google.generativeai.protos.TaskType @@ -193,7 +205,6 @@ * google.generativeai.types * google.generativeai.types.AnyModelNameOptions * google.generativeai.types.AsyncGenerateContentResponse -* google.generativeai.types.AuthorError * google.generativeai.types.BaseModelNameOptions * google.generativeai.types.BlobDict * google.generativeai.types.BlobType @@ -201,17 +212,12 @@ * google.generativeai.types.BlockedReason * google.generativeai.types.BrokenResponseError * google.generativeai.types.CallableFunctionDeclaration -* google.generativeai.types.ChatResponse * google.generativeai.types.CitationMetadataDict * google.generativeai.types.CitationSourceDict -* google.generativeai.types.Completion * google.generativeai.types.ContentDict * google.generativeai.types.ContentFilterDict * google.generativeai.types.ContentType * google.generativeai.types.ContentsType -* google.generativeai.types.ExampleDict -* google.generativeai.types.ExampleOptions -* google.generativeai.types.ExamplesOptions * google.generativeai.types.File * google.generativeai.types.FileDataDict * google.generativeai.types.FileDataType @@ -227,11 +233,6 @@ * google.generativeai.types.HarmCategory * google.generativeai.types.HarmProbability * google.generativeai.types.IncompleteIterationError -* google.generativeai.types.MessageDict -* google.generativeai.types.MessageOptions -* google.generativeai.types.MessagePromptDict -* google.generativeai.types.MessagePromptOptions -* google.generativeai.types.MessagesOptions * google.generativeai.types.Model * google.generativeai.types.ModelNameOptions * google.generativeai.types.ModelsIterable @@ -241,7 +242,6 @@ * google.generativeai.types.Permissions * google.generativeai.types.RequestOptions * google.generativeai.types.RequestOptionsType -* google.generativeai.types.ResponseDict * google.generativeai.types.SafetyFeedbackDict * google.generativeai.types.SafetyRatingDict * google.generativeai.types.SafetySettingDict diff --git a/docs/api/google/generativeai/api_report.pb b/docs/api/google/generativeai/api_report.pb index 96c5f00568db0f4db860421c667f7d315faa1484..bd0f3970f40d4a0bc343cb3857d7592d9ef6848a 100644 GIT binary patch delta 2620 zcmZ`*ZA?>V6z*G~Ed^;O_Jhi8K@pv%r6^SpCq-l$5vgEwI%oO1Vkh>pQgnVW>*uy) zit=RBm}QhX-SFGJ)1WY^*%IfBikX=UQ-go@nJ}EuY}vByy+xqTYk&0S-gBPkoadbL zo_jmCMRgpGvZ~`%A6yBwE3|uldT@q}96i}SIm?=9w`NXVG}%5qD{FdYmTgLQW>)s( zWu|o~QR(+~#GNi)5+mP2khxuNtE#A48B;#4)fjPIJprymI-MjIz58{T z6L65txRIt$lJv3F@cw3MM1wVvk)R1_up}Cu5i4MC0x-`V8Mzp$#_s$gc7p_zon_@wlq^jKwM4FL?AtyD$_3# zl%!`$(e$+%(488K{xmK9WBlF(gw~j~v)6ko)_AH)dB4xYudS#>>rx}SwFD&caq;3X#3V)}n{HnJ-{HMU}*8m+k{(%7Ea zVeHdMl@~4*Nzip*9p^(|ilLKBKs5qU5lr?*U@N9U^}WYAxVeR6b_xTIjq zU9S>_ET63wKkn%=;MTAiElY>9^ep1N4ooVmi^#jXY=a!$MWqrpmkVi;<8Io?C_g)a|EHLmM`Ngd{wK4=k~tYhzVOAxV_whKVK=wOp7^o zj!$UtR^1*;mot8Gj8O5CX8@UYPibM+HIdlK$GACF&) z>_|9gK;4So$5*YQtf8(>@L^$M5|M?OUGC7Z(C`8{RjGpDj-x{PX9S()lO#lo6X12n z)8fibm6)|^zED%=6DsTBN-^Wha}Mlz&SD&VSjyM41;#c!(^buuGcLAs;yI1pLDbWb zTAiHq82#9Bkx)123*t86QK^H*u6`$C^{^f1p#ORw@3(TV)P4hJGSmLL2@w+kP20d| zme*_PMqejE@4=x55;F1jus?WzktkK^^6}N4DnAz5j0n7_M6t)w91OE4228L|OGW=g z1vXlX={5f%x!4}nVApZoZenenA#zdYy3K^7AoTjBaD&*XNf?-sLa*Pq-X(Z&QJ08d zjuLL(f!NjN=rX=$4MMt6tieiX<~qV-c6j~X2$i&U<23^D6sBxe;h&8vDP?!h44K0_ zTFmC_#wa36qBq~TM~I7_4HQPyoZ7cvAZXb>R)UW9?PyhCK?euJE5M-Kse7)@BW)4Yz*zp35Pblp9p~oI&olUOq@$R?J^nrkZ>U{xM7H;FL-761kk?L z-`RUG-GE@58k!F^66w+2gE?HMuT%VR zayrMT+1JopxNlF!#CRj_od0ML1j}&_p-wgKbSUG)B?{y*NO?Z?Qe*SF9HeyVu(DL{4zn)g=eHEGHP+lS%kkvusQ2eqNPhig7jq9BZ9_Q{eL8~`A`4= delta 1697 zcmah}ZERCj818A;uI;|=KD%|hZ8ux;HU7pt77uQp@N~x&RW}-)xo>SxUadLf9f{cCAWipGsK>W$mvVsHc-=(8#aEhW) zYY@SC4D56@`Q01;mx6rFRz{m4(y~3<+&*W6(A4Cr7MwoM7Qx-b1eB~@1VyP?9h!X7 z0J1nEYhx7Y6PkUG!UE$kdO%YoSNOHlVUN!+xdpGwCpd+M4Z@~PweD(B^r4Y}tpEwZ z9E2_USW7p=}zEsRsd4pQ*AYvd+oP7vsoxF6$41a1ELxTE_X_f$#}gWStzaAgK0 zW$P%Pb&H@gF!m>y?I#k9ty_oDoKza%Lj=R+22>g2v3Y3)%`p`#B)BD2MsRtlSTHq# z4o>-*ps;eHprM=d_enLwXLRVAtwneyM_14IieTvUWZ~MZwnS=JFgr#fKB_muv?zyO zSa^$2uIz+VY`Ayu(R?M@i%WVlo_^^c%*vdL`gM(y;;3@2z@1f<^s|?TNynP$ zh^$Sb>s?PNuybv!Dq`bhG8^o9W>hpA=^vXeE6`VM#890YuG-aTGa2Bh)8P7+SiHF9 zDh}3%CDIn7frc8w1mXlt?lk0Is6ZqnKC9`@#^q{Uc-ug??r=+|AWpPDprcD)XF+ua z7E4^g(WgVGMMsys(MParw;xWgfok@wl*4jeM@QexGO`oWFgBb0GG+kyiTQuyHX_0VKK4;cqomgAEJcXx|0<+f0mC~ z#4a#ed|3(OH#{t69mI#H%xt54nz+0|XlnGhn}kBI&{*&12=s||)P{KlTf{)85lN@! z!LUGw$dOohjt-;m8n0U9X=tn$(6THY_v&JC^P-ZBO>iX4YsVISwH`OxLm5#%vxS;q z)})8UQMuc}b)w_6vSyd4_gw8pA18~xCuA~&4whouH$1I6cAHER&1@c`Qx*O`kxE-X zeMcEDvoljN*);aGBRN>gj`$ejL5M?ZPl}X$<0y~yXHxK>=VQs_hECrn^qI5sq=DMH zMPv+?oztMdp>jN6)kKX}d|pcEzvmy}?Aje|+|ipxKlom%{MoKK&a-s76gDV=|HgFH?q z&Rk{_;{g|(f+xPt#E#>9$FFCP%4`0t)?f?J) diff --git a/docs/api/google/generativeai/caching.md b/docs/api/google/generativeai/caching.md new file mode 100644 index 000000000..784ab1b80 --- /dev/null +++ b/docs/api/google/generativeai/caching.md @@ -0,0 +1,49 @@ + +# Module: google.generativeai.caching + + + + + + + + + + + + + +## Classes + +[`class CachedContent`](../../google/generativeai/caching/CachedContent.md): Cached content resource. + +## Functions + +[`get_default_cache_client(...)`](../../google/generativeai/caching/get_default_cache_client.md) + + + + + + + + + + + + +
+ +annotations + + + +Instance of `__future__._Feature` + +
+ diff --git a/docs/api/google/generativeai/caching/CachedContent.md b/docs/api/google/generativeai/caching/CachedContent.md new file mode 100644 index 000000000..a0efa473d --- /dev/null +++ b/docs/api/google/generativeai/caching/CachedContent.md @@ -0,0 +1,448 @@ + +# google.generativeai.caching.CachedContent + + + + + + + + + +Cached content resource. + + + + + + + + + + + + + + + + + +
+ +`name` + + + +The resource name referring to the cached content. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +`create_time` + + + + + +
+ +`display_name` + + + + + +
+ +`expire_time` + + + + + +
+ +`model` + + + + + +
+ +`name` + + + + + +
+ +`update_time` + + + + + +
+ +`usage_metadata` + + + + + +
+ + + +## Methods + +

create

+ +View source + + + +Creates `CachedContent` resource. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Args
+ +`model` + + + +The name of the `model` to use for cached content creation. +Any `CachedContent` resource can be only used with the +`model` it was created for. + +
+ +`display_name` + + + +The user-generated meaningful display name +of the cached content. `display_name` must be no +more than 128 unicode characters. + +
+ +`system_instruction` + + + +Developer set system instruction. + +
+ +`contents` + + + +Contents to cache. + +
+ +`tools` + + + +A list of `Tools` the model may use to generate response. + +
+ +`tool_config` + + + +Config to apply to all tools. + +
+ +`ttl` + + + +TTL for cached resource (in seconds). Defaults to 1 hour. +`ttl` and `expire_time` are exclusive arguments. + +
+ +`expire_time` + + + +Expiration time for cached resource. +`ttl` and `expire_time` are exclusive arguments. + +
+ + + + + + + + + + + +
Returns
+ +`CachedContent` resource with specified name. + +
+ + + +

delete

+ +View source + + + +Deletes `CachedContent` resource. + + +

get

+ +View source + + + +Fetches required `CachedContent` resource. + + + + + + + + + + + +
Args
+ +`name` + + + +The resource name referring to the cached content. + +
+ + + + + + + + + + + +
Returns
+ +`CachedContent` resource with specified `name`. + +
+ + + +

list

+ +View source + + + +Lists `CachedContent` objects associated with the project. + + + + + + + + + + + +
Args
+ +`page_size` + + + +The maximum number of permissions to return (per page). +The service may return fewer `CachedContent` objects. + +
+ + + + + + + + + + + +
Returns
+ +A paginated list of `CachedContent` objects. + +
+ + + +

update

+ +View source + + + +Updates requested `CachedContent` resource. + + + + + + + + + + + + + + +
Args
+ +`ttl` + + + +TTL for cached resource (in seconds). Defaults to 1 hour. +`ttl` and `expire_time` are exclusive arguments. + +
+ +`expire_time` + + + +Expiration time for cached resource. +`ttl` and `expire_time` are exclusive arguments. + +
+ + + + + diff --git a/docs/api/google/generativeai/caching/get_default_cache_client.md b/docs/api/google/generativeai/caching/get_default_cache_client.md new file mode 100644 index 000000000..8457f5c14 --- /dev/null +++ b/docs/api/google/generativeai/caching/get_default_cache_client.md @@ -0,0 +1,26 @@ + +# google.generativeai.caching.get_default_cache_client + + + + + + + + + + + + + + + + + diff --git a/docs/api/google/generativeai/chat.md b/docs/api/google/generativeai/chat.md deleted file mode 100644 index 1dc51c8f6..000000000 --- a/docs/api/google/generativeai/chat.md +++ /dev/null @@ -1,198 +0,0 @@ -description: Calls the API to initiate a chat with a model using provided parameters - -
- - -
- -# google.generativeai.chat - - - - - - - - - -Calls the API to initiate a chat with a model using provided parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-`model` - -Which model to call, as a string or a types.Model. -
-`context` - -Text that should be provided to the model first, to ground the response. - -If not empty, this `context` will be given to the model first before the -`examples` and `messages`. - -This field can be a description of your prompt to the model to help provide -context and guide the responses. - -Examples: - -* "Translate the phrase from English to French." -* "Given a statement, classify the sentiment as happy, sad or neutral." - -Anything included in this field will take precedence over history in `messages` -if the total input size exceeds the model's Model.input_token_limit. -
-`examples` - -Examples of what the model should generate. - -This includes both the user input and the response that the model should -emulate. - -These `examples` are treated identically to conversation messages except -that they take precedence over the history in `messages`: -If the total input size exceeds the model's `input_token_limit` the input -will be truncated. Items will be dropped from `messages` before `examples` -
-`messages` - -A snapshot of the conversation history sorted chronologically. - -Turns alternate between two authors. - -If the total input size exceeds the model's `input_token_limit` the input -will be truncated: The oldest items will be dropped from `messages`. -
-`temperature` - -Controls the randomness of the output. Must be positive. - -Typical values are in the range: `[0.0,1.0]`. Higher values produce a -more random and varied response. A temperature of zero will be deterministic. -
-`candidate_count` - -The **maximum** number of generated response messages to return. - -This value must be between `[1, 8]`, inclusive. If unset, this -will default to `1`. - -Note: Only unique candidates are returned. Higher temperatures are more -likely to produce unique candidates. Setting `temperature=0.0` will always -return 1 candidate regardless of the `candidate_count`. -
-`top_k` - -The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and -top-k sampling. - -`top_k` sets the maximum number of tokens to sample from on each step. -
-`top_p` - -The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and -top-k sampling. - -`top_p` configures the nucleus sampling. It sets the maximum cumulative - probability of tokens to sample from. - - For example, if the sorted probabilities are - `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample - as `[0.625, 0.25, 0.125, 0, 0, 0]`. - - Typical values are in the `[0.9, 1.0]` range. -
-`prompt` - -You may pass a types.MessagePromptOptions **instead** of a -setting `context`/`examples`/`messages`, but not both. -
-`client` - -If you're not relying on the default client, you pass a -`glm.DiscussServiceClient` instead. -
-`request_options` - -Options for the request. -
- - - - - - - - - - - -
-A types.ChatResponse containing the model's reply. -
- diff --git a/docs/api/google/generativeai/chat_async.md b/docs/api/google/generativeai/chat_async.md deleted file mode 100644 index 614456c1f..000000000 --- a/docs/api/google/generativeai/chat_async.md +++ /dev/null @@ -1,198 +0,0 @@ -description: Calls the API to initiate a chat with a model using provided parameters - -
- - -
- -# google.generativeai.chat_async - - - - - - - - - -Calls the API to initiate a chat with a model using provided parameters - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-`model` - -Which model to call, as a string or a types.Model. -
-`context` - -Text that should be provided to the model first, to ground the response. - -If not empty, this `context` will be given to the model first before the -`examples` and `messages`. - -This field can be a description of your prompt to the model to help provide -context and guide the responses. - -Examples: - -* "Translate the phrase from English to French." -* "Given a statement, classify the sentiment as happy, sad or neutral." - -Anything included in this field will take precedence over history in `messages` -if the total input size exceeds the model's Model.input_token_limit. -
-`examples` - -Examples of what the model should generate. - -This includes both the user input and the response that the model should -emulate. - -These `examples` are treated identically to conversation messages except -that they take precedence over the history in `messages`: -If the total input size exceeds the model's `input_token_limit` the input -will be truncated. Items will be dropped from `messages` before `examples` -
-`messages` - -A snapshot of the conversation history sorted chronologically. - -Turns alternate between two authors. - -If the total input size exceeds the model's `input_token_limit` the input -will be truncated: The oldest items will be dropped from `messages`. -
-`temperature` - -Controls the randomness of the output. Must be positive. - -Typical values are in the range: `[0.0,1.0]`. Higher values produce a -more random and varied response. A temperature of zero will be deterministic. -
-`candidate_count` - -The **maximum** number of generated response messages to return. - -This value must be between `[1, 8]`, inclusive. If unset, this -will default to `1`. - -Note: Only unique candidates are returned. Higher temperatures are more -likely to produce unique candidates. Setting `temperature=0.0` will always -return 1 candidate regardless of the `candidate_count`. -
-`top_k` - -The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and -top-k sampling. - -`top_k` sets the maximum number of tokens to sample from on each step. -
-`top_p` - -The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and -top-k sampling. - -`top_p` configures the nucleus sampling. It sets the maximum cumulative - probability of tokens to sample from. - - For example, if the sorted probabilities are - `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample - as `[0.625, 0.25, 0.125, 0, 0, 0]`. - - Typical values are in the `[0.9, 1.0]` range. -
-`prompt` - -You may pass a types.MessagePromptOptions **instead** of a -setting `context`/`examples`/`messages`, but not both. -
-`client` - -If you're not relying on the default client, you pass a -`glm.DiscussServiceClient` instead. -
-`request_options` - -Options for the request. -
- - - - - - - - - - - -
-A types.ChatResponse containing the model's reply. -
- diff --git a/docs/api/google/generativeai/configure.md b/docs/api/google/generativeai/configure.md index f0b5f4006..81c9e19be 100644 --- a/docs/api/google/generativeai/configure.md +++ b/docs/api/google/generativeai/configure.md @@ -1,17 +1,11 @@ -description: Captures default client configuration. - -
- - -
# google.generativeai.configure - + diff --git a/docs/api/google/generativeai/count_message_tokens.md b/docs/api/google/generativeai/count_message_tokens.md deleted file mode 100644 index 7ec05db9b..000000000 --- a/docs/api/google/generativeai/count_message_tokens.md +++ /dev/null @@ -1,41 +0,0 @@ -description: Calls the API to calculate the number of tokens used in the prompt. - -
- - -
- -# google.generativeai.count_message_tokens - - - - - - - - - -Calls the API to calculate the number of tokens used in the prompt. - - - - - - - diff --git a/docs/api/google/generativeai/count_text_tokens.md b/docs/api/google/generativeai/count_text_tokens.md deleted file mode 100644 index a15f0f2aa..000000000 --- a/docs/api/google/generativeai/count_text_tokens.md +++ /dev/null @@ -1,37 +0,0 @@ -description: Calls the API to count the number of tokens in the text prompt. - -
- - -
- -# google.generativeai.count_text_tokens - - - - - - - - - -Calls the API to count the number of tokens in the text prompt. - - - - - - - diff --git a/docs/api/google/generativeai/create_tuned_model.md b/docs/api/google/generativeai/create_tuned_model.md index c12179164..04ce93d75 100644 --- a/docs/api/google/generativeai/create_tuned_model.md +++ b/docs/api/google/generativeai/create_tuned_model.md @@ -1,15 +1,9 @@ -description: Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress. - -
- - -
# google.generativeai.create_tuned_model - + @@ -190,7 +236,9 @@ Options for the request. + A [`google.api_core.operation.Operation`](https://googleapis.dev/python/google-api-core/latest/operation.html) + diff --git a/docs/api/google/generativeai/delete_file.md b/docs/api/google/generativeai/delete_file.md index 1098c3afb..5a3bfc295 100644 --- a/docs/api/google/generativeai/delete_file.md +++ b/docs/api/google/generativeai/delete_file.md @@ -1,17 +1,11 @@ -description: Calls the API to permanently delete a specified file using a supported file service. - -
- - -
# google.generativeai.delete_file - + + + + + + + + + + + + + + + + diff --git a/docs/api/google/generativeai/protos/Chunk.md b/docs/api/google/generativeai/protos/Chunk.md index d807ab292..b1fbdb9e6 100644 --- a/docs/api/google/generativeai/protos/Chunk.md +++ b/docs/api/google/generativeai/protos/Chunk.md @@ -1,16 +1,9 @@ -description: A Chunk is a subpart of a Document that is treated as an independent unit for the purposes of vector representation and storage. - -
- - - -
# google.generativeai.protos.Chunk - + diff --git a/docs/api/google/generativeai/protos/Chunk/State.md b/docs/api/google/generativeai/protos/Chunk/State.md index 137cb9efe..c825186bf 100644 --- a/docs/api/google/generativeai/protos/Chunk/State.md +++ b/docs/api/google/generativeai/protos/Chunk/State.md @@ -1,62 +1,9 @@ -description: States for the lifecycle of a Chunk. - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# google.generativeai.protos.Chunk.State - + @@ -136,31 +99,47 @@ state is omitted. + `denominator` + + the denominator of a rational number in lowest terms + + `imag` + + the imaginary part of a complex number + + `numerator` + + the numerator of a rational number in lowest terms + + `real` + + the real part of a complex number + @@ -175,10 +154,9 @@ the real part of a complex number as_integer_ratio() -Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -254,6 +232,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -257,6 +235,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -305,6 +298,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -235,6 +207,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -256,6 +234,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -262,6 +240,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -256,6 +234,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -245,6 +220,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -245,6 +247,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -331,6 +342,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -256,6 +234,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -255,6 +233,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -301,6 +291,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes

-Return integer ratio. +Return a pair of integers, whose ratio is equal to the original int. -Return a pair of integers, whose ratio is exactly equal to the original int -and with a positive denominator. +The ratio is in lowest terms and has a positive denominator. ``` >>> (10).as_integer_ratio() @@ -283,6 +270,15 @@ byteorder signed Indicates whether two's complement is used to represent the integer. +

is_integer

+ + + +Returns True. Exists for duck type compatibility with float.is_integer. + +

to_bytes