Skip to content

Commit 1485c42

Browse files
fix(generative_ai): fix typos in function calling sample (GoogleCloudPlatform#12679)
* Fix typos in the function declaration 'description'; wrap long line * Code formatting in generative_ai/function_calling/parallel_function_calling_example.py Co-authored-by: Sampath Kumar <sam1990kumar@gmail.com>
1 parent 6123233 commit 1485c42

File tree

9 files changed

+11
-12
lines changed

9 files changed

+11
-12
lines changed

generative_ai/context_caching/list_context_caches.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ def list_content_caches() -> list[str]:
3838
# Cached content 'example-cache' for model '.../gemini-1.5-pro-001'
3939
# Last updated at: 2024-09-16T12:41:09.998635Z
4040
# Expires at: 2024-09-16T13:41:09.989729Z
41-
4241
# [END generativeaionvertexai_context_caching_list]
4342
return [cached_content.name for cached_content in cache_list]
4443

generative_ai/embeddings/batch_example.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ def embed_text_batch() -> BatchPredictionJob:
2626
"""
2727
# [START generativeaionvertexai_embedding_batch]
2828
import vertexai
29+
2930
from vertexai.preview import language_models
3031

3132
# TODO(developer): Update & uncomment line below
@@ -52,9 +53,7 @@ def embed_text_batch() -> BatchPredictionJob:
5253
# BatchPredictionJob 2024-09-10 15:47:51.336391
5354
# projects/1234567890/locations/us-central1/batchPredictionJobs/123456789012345
5455
# JobState.JOB_STATE_SUCCEEDED
55-
5656
# [END generativeaionvertexai_embedding_batch]
57-
5857
return batch_prediction_job
5958

6059

generative_ai/embeddings/generate_embeddings_with_lower_dimension.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,7 @@ def generate_embeddings_with_lower_dimension() -> MultiModalEmbeddingResponse:
5555
# Example response:
5656
# Image Embedding: [0.0622573346, -0.0406507477, 0.0260440577, ...]
5757
# Text Embedding: [0.27469793, -0.146258667, 0.0222803634, ...]
58-
5958
# [END generativeaionvertexai_embeddings_specify_lower_dimension]
60-
6159
return embeddings
6260

6361

generative_ai/evaluation/get_rouge_score.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ def get_rouge_score() -> EvalResult:
9595
# 0 Summarize the following text:\n\n\n ... 0.659794 0.484211 ...
9696
# 1 Summarize the following text:\n\n\n ... 0.704762 0.524272 ...
9797
# ...
98-
9998
# [END generativeaionvertexai_evaluation_get_rouge_score]
10099
return result
101100

generative_ai/evaluation/pairwise_summarization_quality.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,6 @@ def evaluate_output() -> EvalResult:
9898
#
9999
# Winner: CANDIDATE
100100
# Explanation: Both responses adhere to the prompt's constraints, are grounded in the provided text, and ... However, Response B ...
101-
102101
# [END generativeaionvertexai_evaluation_pairwise_summarization_quality]
103102
return result
104103

generative_ai/function_calling/advanced_example.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
def generate_function_call_advanced() -> GenerationResponse:
2323
# [START generativeaionvertexai_gemini_function_calling_advanced]
2424
import vertexai
25+
2526
from vertexai.preview.generative_models import (
2627
FunctionDeclaration,
2728
GenerativeModel,

generative_ai/function_calling/basic_example.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
def generate_function_call() -> GenerationResponse:
2323
# [START generativeaionvertexai_gemini_function_calling]
2424
import vertexai
25+
2526
from vertexai.generative_models import (
2627
Content,
2728
FunctionDeclaration,

generative_ai/function_calling/chat_example.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
def generate_function_call_chat() -> ChatSession:
2222
# [START generativeaionvertexai_gemini_function_calling_chat]
2323
import vertexai
24+
2425
from vertexai.generative_models import (
2526
FunctionDeclaration,
2627
GenerationConfig,

generative_ai/function_calling/parallel_function_calling_example.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
def parallel_function_calling_example() -> ChatSession:
2222
# [START generativeaionvertexai_function_calling_generate_parallel_calls]
2323
import vertexai
24+
2425
from vertexai.generative_models import (
2526
FunctionDeclaration,
2627
GenerativeModel,
@@ -44,7 +45,9 @@ def parallel_function_calling_example() -> ChatSession:
4445
"properties": {
4546
"location": {
4647
"type": "string",
47-
"description": "The location to whih to get the weather. Can be a city name, a city name and state, or a zip code. Examples: 'San Francisco', 'San Francisco, CA', '95616', etc.",
48+
"description": "The location for which to get the weather. \
49+
It can be a city name, a city name and state, or a zip code. \
50+
Examples: 'San Francisco', 'San Francisco, CA', '95616', etc.",
4851
},
4952
},
5053
},
@@ -67,8 +70,8 @@ def mock_weather_api_service(location: str) -> str:
6770
)
6871

6972
# Start a chat session
70-
chat = model.start_chat()
71-
response = chat.send_message("Get weather details in New Delhi and San Francisco?")
73+
chat_session = model.start_chat()
74+
response = chat_session.send_message("Get weather details in New Delhi and San Francisco?")
7275

7376
function_calls = response.candidates[0].function_calls
7477
print("Suggested finction calls:\n", function_calls)
@@ -86,7 +89,7 @@ def mock_weather_api_service(location: str) -> str:
8689
)
8790

8891
# Return the API response to Gemini
89-
response = chat.send_message(
92+
response = chat_session.send_message(
9093
[
9194
Part.from_function_response(
9295
name="get_current_weather",
@@ -102,7 +105,6 @@ def mock_weather_api_service(location: str) -> str:
102105
print(response.text)
103106
# Example response:
104107
# The current weather in New Delhi is 35°C. The current weather in San Francisco is 25°C.
105-
106108
# [END generativeaionvertexai_function_calling_generate_parallel_calls]
107109
return response
108110

0 commit comments

Comments
 (0)