diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py index 74f03029c..82d36f7c5 100644 --- a/google/generativeai/types/content_types.py +++ b/google/generativeai/types/content_types.py @@ -369,7 +369,9 @@ def _schema_for_function( ) ) ] - schema = dict(name=f.__name__, description=f.__doc__, parameters=parameters) + schema = dict(name=f.__name__, description=f.__doc__) + if parameters["properties"]: + schema["parameters"] = parameters return schema diff --git a/samples/json_mode.py b/samples/controlled_generation.py similarity index 100% rename from samples/json_mode.py rename to samples/controlled_generation.py diff --git a/samples/count_tokens.py b/samples/count_tokens.py index 81bedeb4b..42c40d8e1 100644 --- a/samples/count_tokens.py +++ b/samples/count_tokens.py @@ -20,9 +20,6 @@ media = pathlib.Path(__file__).parents[1] / "third_party" - - - class UnitTests(absltest.TestCase): def test_tokens_text_only(self): # [START tokens_text_only] @@ -84,8 +81,10 @@ def test_tokens_cached_content(self): def test_tokens_system_instruction(self): # [START tokens_system_instruction] document = genai.upload_file(path=media / "a11.txt") - model = genai.GenerativeModel("models/gemini-1.5-flash-001", - system_instruction="You are an expert analyzing transcripts. Give a summary of this document.") + model = genai.GenerativeModel( + "models/gemini-1.5-flash-001", + system_instruction="You are an expert analyzing transcripts. Give a summary of this document.", + ) print(model.count_tokens(document)) # [END tokens_system_instruction] @@ -95,25 +94,27 @@ def add(a: float, b: float): """returns a + b.""" return a + b - def subtract(a: float, b: float): """returns a - b.""" return a - b - def multiply(a: float, b: float): """returns a * b.""" return a * b - def divide(a: float, b: float): """returns a / b.""" return a / b - - model = genai.GenerativeModel("models/gemini-1.5-flash-001", - tools=[add, subtract, multiply, divide]) - - print(model.count_tokens("I have 57 cats, each owns 44 mittens, how many mittens is that in total?")) + + model = genai.GenerativeModel( + "models/gemini-1.5-flash-001", tools=[add, subtract, multiply, divide] + ) + + print( + model.count_tokens( + "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" + ) + ) # [END tokens_tools] diff --git a/samples/function_calling.py b/samples/function_calling.py new file mode 100644 index 000000000..8832408cf --- /dev/null +++ b/samples/function_calling.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from absl.testing import absltest + +import google.generativeai as genai + + +class UnitTests(absltest.TestCase): + def test_function_calling(self): + # [START function_calling] + def add(a: float, b: float): + """returns a + b.""" + return a + b + + def subtract(a: float, b: float): + """returns a - b.""" + return a - b + + def multiply(a: float, b: float): + """returns a * b.""" + return a * b + + def divide(a: float, b: float): + """returns a / b.""" + return a / b + + model = genai.GenerativeModel( + model_name="gemini-1.5-flash", tools=[add, subtract, multiply, divide] + ) + chat = model.start_chat(enable_automatic_function_calling=True) + response = chat.send_message( + "I have 57 cats, each owns 44 mittens, how many mittens is that in total?" + ) + print(response.text) + # [END function_calling] + + +if __name__ == "__main__": + absltest.main() diff --git a/samples/text_generation.py b/samples/text_generation.py index 015a00e1f..6ba793dfa 100644 --- a/samples/text_generation.py +++ b/samples/text_generation.py @@ -41,6 +41,7 @@ def test_text_gen_text_only_prompt_streaming(self): def test_text_gen_multimodal_one_image_prompt(self): # [START text_gen_multimodal_one_image_prompt] import PIL + model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") response = model.generate_content(["Tell me about this instrument", organ]) @@ -50,6 +51,7 @@ def test_text_gen_multimodal_one_image_prompt(self): def test_text_gen_multimodal_one_image_prompt_streaming(self): # [START text_gen_multimodal_one_image_prompt_streaming] import PIL + model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") response = model.generate_content(["Tell me about this instrument", organ], stream=True) @@ -61,6 +63,7 @@ def test_text_gen_multimodal_one_image_prompt_streaming(self): def test_text_gen_multimodal_multi_image_prompt(self): # [START text_gen_multimodal_multi_image_prompt] import PIL + model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") cajun_instrument = PIL.Image.open(media / "Cajun_instruments.jpg") @@ -73,6 +76,7 @@ def test_text_gen_multimodal_multi_image_prompt(self): def test_text_gen_multimodal_multi_image_prompt_streaming(self): # [START text_gen_multimodal_multi_image_prompt_streaming] import PIL + model = genai.GenerativeModel("gemini-1.5-flash") organ = PIL.Image.open(media / "organ.jpg") cajun_instrument = PIL.Image.open(media / "Cajun_instruments.jpg") diff --git a/tests/test_content.py b/tests/test_content.py index 5b7aa9781..b52858bb8 100644 --- a/tests/test_content.py +++ b/tests/test_content.py @@ -378,6 +378,15 @@ def test_to_tools(self, tools): self.assertEqual(tools, expected) + def test_empty_function(self): + def no_args(): + print("hello") + + fd = content_types.to_function_library(no_args).to_proto()[0] # type: ignore + fd = type(fd).to_dict(fd, including_default_value_fields=False) + # parameters are not set. + self.assertEqual({"function_declarations": [{"name": "no_args"}]}, fd) + @parameterized.named_parameters( ["string", "code_execution"], ["proto_object", protos.CodeExecution()],