diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml
index 6dcd9ab15..19574ff0a 100644
--- a/.github/workflows/labeler.yml
+++ b/.github/workflows/labeler.yml
@@ -11,5 +11,5 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- - uses: actions/labeler@v4
+ - uses: actions/labeler@ac9175f8a1f3625fd0d4fb234536d26811351594 # v4
if: ${{ github.event.pull_request.draft == false }}
diff --git a/.github/workflows/remove-issue-labels.yml b/.github/workflows/remove-issue-labels.yml
index 43f43dd13..79783430d 100644
--- a/.github/workflows/remove-issue-labels.yml
+++ b/.github/workflows/remove-issue-labels.yml
@@ -11,7 +11,7 @@ jobs:
issues: write
runs-on: ubuntu-latest
steps:
- - uses: actions-ecosystem/action-remove-labels@v1
+ - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1
with:
labels: |
status:triaged
diff --git a/.github/workflows/remove-pr-labels.yml b/.github/workflows/remove-pr-labels.yml
index 3aed6ced7..64aeeaa64 100644
--- a/.github/workflows/remove-pr-labels.yml
+++ b/.github/workflows/remove-pr-labels.yml
@@ -11,7 +11,7 @@ jobs:
pull-requests: write
runs-on: ubuntu-latest
steps:
- - uses: actions-ecosystem/action-remove-labels@v1
+ - uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1
with:
labels: |
status:awaiting review
diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml
new file mode 100644
index 000000000..8a3dc6462
--- /dev/null
+++ b/.github/workflows/samples.yaml
@@ -0,0 +1,86 @@
+name: Validate samples
+
+on:
+ pull_request:
+ types: [opened, synchronize] # new, updates
+
+jobs:
+ update-python-list:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+
+ - name: Get Changed Files
+ id: changed_files
+ uses: tj-actions/changed-files@2f7c5bfce28377bc069a65ba478de0a74aa0ca32 # v44
+ with:
+ files: |
+ samples/*.py
+
+ - name: Check Python samples
+ env:
+ NEW_FILES: ${{ steps.changed_files.outputs.all_modified_files }}
+ README: samples/README.md
+ run: |
+ #!/bin/bash
+
+ for file in ${NEW_FILES}; do
+ echo "Testing $file"
+ name=$(basename $file)
+ if [[ -f ${file} ]]; then
+ # File exists, so needs to be listed.
+ if ! grep -q $name ${README}; then
+ echo "Error: Sample not listed in README ($name)"
+ exit 1
+ fi
+ else
+ # File does not exist, ensure it's not listed
+ if grep -q $name ${README}; then
+ echo "Error: Sample should not be listed in README ($name)"
+ exit 1
+ fi
+ fi
+ done
+
+ update-rest-list:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+
+ - name: Get Changed Files
+ id: changed_files
+ uses: tj-actions/changed-files@2f7c5bfce28377bc069a65ba478de0a74aa0ca32 # v44
+ with:
+ files: |
+ samples/rest/*.sh
+
+ - name: Check REST samples
+ env:
+ NEW_FILES: ${{ steps.changed_files.outputs.all_modified_files }}
+ README: samples/rest/README.md
+ run: |
+ #!/bin/bash
+
+ for file in ${NEW_FILES}; do
+ echo "Testing $file"
+ if [[ -f ${file} ]]; then
+ # File exists, so needs to be listed.
+ echo $(basename $file)
+ name=$(basename $file)
+ if ! grep -q $name ${README}; then
+ echo "Error: Sample not listed in README ($name)"
+ exit 1
+ fi
+ else
+ # File does not exist, ensure it's not listed
+ name=$(basename $file)
+ if grep -q $name ${README}; then
+ echo "Error: Sample should not be listed in README ($name)"
+ exit 1
+ fi
+ fi
+ done
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index df7d6c186..1f7710e5a 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -19,7 +19,7 @@ jobs:
pull-requests: write
steps:
- - uses: actions/stale@v5
+ - uses: actions/stale@f7176fd3007623b69d27091f9b9d4ab7995f0a06 # v5
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-issue-stale: 14
diff --git a/.github/workflows/test_pr.yaml b/.github/workflows/test_pr.yaml
index 362a53e49..35f7c8fea 100644
--- a/.github/workflows/test_pr.yaml
+++ b/.github/workflows/test_pr.yaml
@@ -19,8 +19,8 @@ jobs:
name: Test Py3.12
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+ - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4
with:
python-version: '3.12'
- name: Run tests
@@ -32,8 +32,8 @@ jobs:
name: Test Py3.11
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+ - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4
with:
python-version: '3.11'
- name: Run tests
@@ -45,8 +45,8 @@ jobs:
name: Test Py3.10
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+ - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4
with:
python-version: '3.10'
- name: Run tests
@@ -58,8 +58,8 @@ jobs:
name: Test Py3.9
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+ - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4
with:
python-version: '3.9'
- name: Run tests
@@ -71,8 +71,8 @@ jobs:
name: pytype 3.11
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+ - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4
with:
python-version: '3.11'
- name: Run pytype
@@ -86,8 +86,8 @@ jobs:
name: Check format with black
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- - uses: actions/setup-python@v4
+ - uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3
+ - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4
with:
python-version: '3.11'
- name: Check format
diff --git a/.github/workflows/user-input.yml b/.github/workflows/user-input.yml
new file mode 100644
index 000000000..0aa0d6512
--- /dev/null
+++ b/.github/workflows/user-input.yml
@@ -0,0 +1,16 @@
+name: Manage awaiting user response
+
+on:
+ issue_comment:
+ types: [created]
+ pull_request_review_comment:
+ types: [created]
+
+jobs:
+ remove_label:
+ runs-on: ubuntu-latest
+ if: "contains(github.event.issue.labels.*.name, 'status: awaiting user response')"
+ steps:
+ - uses: actions-ecosystem/action-remove-labels@v1
+ with:
+ labels: "status: awaiting user response"
diff --git a/.gitignore b/.gitignore
index 10692be5c..72ac0ed80 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,7 +3,6 @@
/.idea/
/.pytype/
/build/
-/docs/api
*.egg-info
.DS_Store
__pycache__
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9415df2a8..0e4179149 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -93,10 +93,11 @@ pytest
Or to debug, use:
-```commandline
+```
pip install nose2
nose2 --debugger
+```
### Type checking
@@ -124,7 +125,6 @@ black .
python docs/build_docs.py
```
-
[setup]: https://cloud.google.com/nodejs/docs/setup
[projects]: https://console.cloud.google.com/project
[billing]: https://support.google.com/cloud/answer/6293499#enable-billing
diff --git a/README.md b/README.md
index c0200f5b2..83b05f704 100644
--- a/README.md
+++ b/README.md
@@ -1,51 +1,26 @@
-# Google AI Python SDK for the Gemini API
+# [Deprecated] Google AI Python SDK for the Gemini API
-[](https://badge.fury.io/py/google-generativeai)
-
-
+With Gemini 2.0, we took the chance to create a single unified SDK for all developers who want to use Google's GenAI models (Gemini, Veo, Imagen, etc). As part of that process, we took all of the feedback from this SDK and what developers like about other SDKs in the ecosystem to create the [Google Gen AI SDK](https://github.com/googleapis/python-genai).
-The Google AI Python SDK is the easiest way for Python developers to build with the Gemini API. The Gemini API gives you access to Gemini [models](https://ai.google.dev/models/gemini) created by [Google DeepMind](https://deepmind.google/technologies/gemini/#introduction). Gemini models are built from the ground up to be multimodal, so you can reason seamlessly across text, images, and code.
+The full migration guide from the old SDK to new SDK is available in the [Gemini API docs](https://ai.google.dev/gemini-api/docs/migrate).
-## Get started with the Gemini API
-1. Go to [Google AI Studio](https://aistudio.google.com/).
-2. Login with your Google account.
-3. [Create](https://aistudio.google.com/app/apikey) an API key. Note that in Europe the free tier is not available.
-4. Try a Python SDK [quickstart](https://github.com/google-gemini/gemini-api-cookbook/blob/main/quickstarts/Prompting.ipynb) in the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/).
-5. For detailed instructions, try the
-[Python SDK tutorial](https://ai.google.dev/tutorials/python_quickstart) on [ai.google.dev](https://ai.google.dev).
+The Gemini API docs are fully updated to show examples of the new Google Gen AI SDK. We know how disruptive an SDK change can be and don't take this change lightly, but our goal is to create an extremely simple and clear path for developers to build with our models so it felt necessary to make this change.
-## Usage example
-See the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/) or [ai.google.dev](https://ai.google.dev) for complete code.
+Thank you for building with Gemini and [let us know](https://discuss.ai.google.dev/c/gemini-api/4) if you need any help!
-1. Install from [PyPI](https://pypi.org/project/google-generativeai).
+**Please be advised that this repository is now considered legacy.** For the latest features, performance improvements, and active development, we strongly recommend migrating to the official **[Google Generative AI SDK for Python](https://github.com/googleapis/python-genai)**.
-`pip install -U google-generativeai`
+**Support Plan for this Repository:**
-2. Import the SDK and configure your API key.
+* **Limited Maintenance:** Development is now restricted to **critical bug fixes only**. No new features will be added.
+* **Purpose:** This limited support aims to provide stability for users while they transition to the new SDK.
+* **End-of-Life Date:** All support for this repository (including bug fixes) will permanently end on **November 30, 2025**.
-```python
-import google.generativeai as genai
-import os
+We encourage all users to begin planning their migration to the [Google Generative AI SDK](https://github.com/googleapis/python-genai) to ensure continued access to the latest capabilities and support.
-genai.configure(api_key=os.environ["GOOGLE_API_KEY"])
-```
-
-3. Create a model and run a prompt.
-
-```python
-model = genai.GenerativeModel('gemini-1.0-pro-latest')
-response = model.generate_content("The opposite of hot is")
-print(response.text)
-```
-
-## Documentation
-
-See the [Gemini API Cookbook](https://github.com/google-gemini/gemini-api-cookbook/) or [ai.google.dev](https://ai.google.dev) for complete documentation.
-
-## Contributing
-
-See [Contributing](https://github.com/google/generative-ai-python/blob/main/CONTRIBUTING.md) for more information on contributing to the Google AI Python SDK.
-
-## License
-
-The contents of this repository are licensed under the [Apache License, version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
+
diff --git a/docs/ais-templates/aistudio_gemini_prompt_freeform.ipynb b/docs/ais-templates/aistudio_gemini_prompt_freeform.ipynb
new file mode 100644
index 000000000..f53d3b2a5
--- /dev/null
+++ b/docs/ais-templates/aistudio_gemini_prompt_freeform.ipynb
@@ -0,0 +1,332 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Tce3stUlHN0L"
+ },
+ "source": [
+ "##### Copyright 2023 Google LLC"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "tuOe1ymfHZPu"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FKwyTRdwB8aW"
+ },
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "RXInneX6xx7c"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q \"google-generativeai>=0.8.2\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "kWIuwKG2_oWE"
+ },
+ "outputs": [],
+ "source": [
+ "# import necessary modules.\n",
+ "import base64\n",
+ "import copy\n",
+ "import json\n",
+ "import pathlib\n",
+ "import requests\n",
+ "\n",
+ "\n",
+ "import PIL.Image\n",
+ "import IPython.display\n",
+ "from IPython.display import Markdown\n",
+ "\n",
+ "try:\n",
+ " # The SDK will automatically read it from the GOOGLE_API_KEY environment variable.\n",
+ " # In Colab get the key from Colab-secrets (\"🔑\" in the left panel).\n",
+ " import os\n",
+ " from google.colab import userdata\n",
+ "\n",
+ " os.environ[\"GOOGLE_API_KEY\"] = userdata.get(\"GOOGLE_API_KEY\")\n",
+ "except ImportError:\n",
+ " pass\n",
+ "\n",
+ "import google.generativeai as genai\n",
+ "\n",
+ "# Parse the arguments\n",
+ "\n",
+ "model = \"gemini-1.5-flash\" # @param {isTemplate: true}\n",
+ "contents_b64 = \"W3sicGFydHMiOiBbeyJ0ZXh0IjogIldoYXQncyBpbiB0aGlzIHBpY3R1cmU/In0sIHsiZmlsZV9kYXRhIjogeyJ1cmwiOiAiaHR0cHM6Ly9zdG9yYWdlLmdvb2dsZWFwaXMuY29tL2dlbmVyYXRpdmVhaS1kb3dubG9hZHMvaW1hZ2VzL3Njb25lcy5qcGciLCAibWltZV90eXBlIjogImltYWdlL2pwZWcifX1dfV0=\" # @param {isTemplate: true}\n",
+ "generation_config_b64 = \"e30=\" # @param {isTemplate: true}\n",
+ "safety_settings_b64 = \"e30=\" # @param {isTemplate: true}\n",
+ "\n",
+ "gais_contents = json.loads(base64.b64decode(contents_b64))\n",
+ "\n",
+ "generation_config = json.loads(base64.b64decode(generation_config_b64))\n",
+ "safety_settings = json.loads(base64.b64decode(safety_settings_b64))\n",
+ "\n",
+ "stream = False\n",
+ "\n",
+ "# Convert and upload the files\n",
+ "\n",
+ "tempfiles = pathlib.Path(f\"tempfiles\")\n",
+ "tempfiles.mkdir(parents=True, exist_ok=True)\n",
+ "\n",
+ "\n",
+ "drive = None\n",
+ "def upload_file_data(file_data, index):\n",
+ " \"\"\"Upload files to the Files API.\n",
+ "\n",
+ " For each file, Google AI Studio either sent:\n",
+ " - a Google Drive ID,\n",
+ " - a URL,\n",
+ " - a file path, or\n",
+ " - The raw bytes (`inline_data`).\n",
+ "\n",
+ " The API only understands `inline_data` or it's Files API.\n",
+ " This code, uploads files to the files API where the API can access them.\n",
+ " \"\"\"\n",
+ "\n",
+ " mime_type = file_data[\"mime_type\"]\n",
+ " if drive_id := file_data.pop(\"drive_id\", None):\n",
+ " if drive is None:\n",
+ " from google.colab import drive\n",
+ " drive.mount(\"/gdrive\")\n",
+ "\n",
+ " path = next(\n",
+ " pathlib.Path(f\"/gdrive/.shortcut-targets-by-id/{drive_id}\").glob(\"*\")\n",
+ " )\n",
+ " print(\"Uploading:\", str(path))\n",
+ " file_info = genai.upload_file(path=path, mime_type=mime_type)\n",
+ " file_data[\"file_uri\"] = file_info.uri\n",
+ " return\n",
+ "\n",
+ " if url := file_data.pop(\"url\", None):\n",
+ " response = requests.get(url)\n",
+ " data = response.content\n",
+ " name = url.split(\"/\")[-1]\n",
+ " path = tempfiles / str(index)\n",
+ " path.write_bytes(data)\n",
+ " print(\"Uploading:\", url)\n",
+ " file_info = genai.upload_file(path, display_name=name, mime_type=mime_type)\n",
+ " file_data[\"file_uri\"] = file_info.uri\n",
+ " return\n",
+ "\n",
+ " if name := file_data.get(\"filename\", None):\n",
+ " if not pathlib.Path(name).exists():\n",
+ " raise IOError(\n",
+ " f\"local file: `{name}` does not exist. You can upload files \"\n",
+ " 'to Colab using the file manager (\"📁 Files\" in the left '\n",
+ " \"toolbar)\"\n",
+ " )\n",
+ " file_info = genai.upload_file(path, display_name=name, mime_type=mime_type)\n",
+ " file_data[\"file_uri\"] = file_info.uri\n",
+ " return\n",
+ "\n",
+ " if \"inline_data\" in file_data:\n",
+ " return\n",
+ "\n",
+ " raise ValueError(\"Either `drive_id`, `url` or `inline_data` must be provided.\")\n",
+ "\n",
+ "\n",
+ "contents = copy.deepcopy(gais_contents)\n",
+ "\n",
+ "index = 0\n",
+ "for content in contents:\n",
+ " for n, part in enumerate(content[\"parts\"]):\n",
+ " if file_data := part.get(\"file_data\", None):\n",
+ " upload_file_data(file_data, index)\n",
+ " index += 1\n",
+ "\n",
+ "import json\n",
+ "print(json.dumps(contents, indent=4))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "E7zAD69vE92b"
+ },
+ "source": [
+ "## Call `generate_content`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "LB2LxPmAB95V"
+ },
+ "outputs": [],
+ "source": [
+ "from IPython.display import display\n",
+ "from IPython.display import Markdown\n",
+ "\n",
+ "# Call the model and print the response.\n",
+ "gemini = genai.GenerativeModel(model_name=model)\n",
+ "\n",
+ "response = gemini.generate_content(\n",
+ " contents,\n",
+ " generation_config=generation_config,\n",
+ " safety_settings=safety_settings,\n",
+ " stream=stream,\n",
+ ")\n",
+ "\n",
+ "display(Markdown(response.text))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "9c9d345e9868"
+ },
+ "source": [
+ "
"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "F91AeeGO1ncU"
+ },
+ "source": [
+ "## [optional] Show the conversation\n",
+ "\n",
+ "This section displays the conversation received from Google AI Studio."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "yoL3p3KPylFW"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Show the conversation, in colab.\n",
+ "import mimetypes\n",
+ "\n",
+ "def show_file(file_data):\n",
+ " mime_type = file_data[\"mime_type\"]\n",
+ "\n",
+ " if drive_id := file_data.get(\"drive_id\", None):\n",
+ " path = next(\n",
+ " pathlib.Path(f\"/gdrive/.shortcut-targets-by-id/{drive_id}\").glob(\"*\")\n",
+ " )\n",
+ " name = path\n",
+ " # data = path.read_bytes()\n",
+ " kwargs = {\"filename\": path}\n",
+ " elif url := file_data.get(\"url\", None):\n",
+ " name = url\n",
+ " kwargs = {\"url\": url}\n",
+ " # response = requests.get(url)\n",
+ " # data = response.content\n",
+ " elif data := file_data.get(\"inline_data\", None):\n",
+ " name = None\n",
+ " kwargs = {\"data\": data}\n",
+ " elif name := file_data.get(\"filename\", None):\n",
+ " if not pathlib.Path(name).exists():\n",
+ " raise IOError(\n",
+ " f\"local file: `{name}` does not exist. You can upload files to \"\n",
+ " 'Colab using the file manager (\"📁 Files\"in the left toolbar)'\n",
+ " )\n",
+ " else:\n",
+ " raise ValueError(\"Either `drive_id`, `url` or `inline_data` must be provided.\")\n",
+ "\n",
+ " print(f\"File:\\n name: {name}\\n mime_type: {mime_type}\\n\")\n",
+ " return\n",
+ "\n",
+ " format = mimetypes.guess_extension(mime_type).strip(\".\")\n",
+ " if mime_type.startswith(\"image/\"):\n",
+ " image = IPython.display.Image(**kwargs, width=256)\n",
+ " IPython.display.display(image)\n",
+ " print()\n",
+ " return\n",
+ "\n",
+ " if mime_type.startswith(\"audio/\"):\n",
+ " if len(data) < 2**12:\n",
+ " audio = IPython.display.Audio(**kwargs)\n",
+ " IPython.display.display(audio)\n",
+ " print()\n",
+ " return\n",
+ "\n",
+ " if mime_type.startswith(\"video/\"):\n",
+ " if len(data) < 2**12:\n",
+ " audio = IPython.display.Video(**kwargs, mimetype=mime_type)\n",
+ " IPython.display.display(audio)\n",
+ " print()\n",
+ " return\n",
+ "\n",
+ " print(f\"File:\\n name: {name}\\n mime_type: {mime_type}\\n\")\n",
+ "\n",
+ "\n",
+ "for content in gais_contents:\n",
+ " if role := content.get(\"role\", None):\n",
+ " print(\"Role:\", role, \"\\n\")\n",
+ "\n",
+ " for n, part in enumerate(content[\"parts\"]):\n",
+ " if text := part.get(\"text\", None):\n",
+ " print(text, \"\\n\")\n",
+ "\n",
+ " elif file_data := part.get(\"file_data\", None):\n",
+ " show_file(file_data)\n",
+ "\n",
+ " print(\"-\" * 80, \"\\n\")"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "Tce3stUlHN0L"
+ ],
+ "name": "aistudio_gemini_prompt_freeform.ipynb",
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/docs/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb b/docs/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb
new file mode 100644
index 000000000..5182f136b
--- /dev/null
+++ b/docs/ais-templates/aistudio_gemini_prompt_freeform_nofiles.ipynb
@@ -0,0 +1,167 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "Tce3stUlHN0L"
+ },
+ "source": [
+ "##### Copyright 2023 Google LLC"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "tuOe1ymfHZPu"
+ },
+ "outputs": [],
+ "source": [
+ "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# https://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "FKwyTRdwB8aW"
+ },
+ "source": [
+ "## Setup"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "RXInneX6xx7c"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install -U -q \"google-generativeai>=0.8.2\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "cellView": "form",
+ "id": "kWIuwKG2_oWE"
+ },
+ "outputs": [],
+ "source": [
+ "# import necessary modules.\n",
+ "\n",
+ "import google.generativeai as genai\n",
+ "\n",
+ "import base64\n",
+ "import json\n",
+ "\n",
+ "try:\n",
+ " # Mount google drive\n",
+ " from google.colab import drive\n",
+ "\n",
+ " drive.mount(\"/gdrive\")\n",
+ "\n",
+ " # The SDK will automatically read it from the GOOGLE_API_KEY environment variable.\n",
+ " # In Colab get the key from Colab-secrets (\"🔑\" in the left panel).\n",
+ " import os\n",
+ " from google.colab import userdata\n",
+ "\n",
+ " os.environ[\"GOOGLE_API_KEY\"] = userdata.get(\"GOOGLE_API_KEY\")\n",
+ "except ImportError:\n",
+ " pass\n",
+ "\n",
+ "# Parse the arguments\n",
+ "\n",
+ "model = \"gemini-1.5-flash\" # @param {isTemplate: true}\n",
+ "contents_b64 = b'W3sicGFydHMiOiBbeyJ0ZXh0IjogIkhlbGxvIn1dfV0='\n",
+ "generation_config_b64 = \"e30=\" # @param {isTemplate: true}\n",
+ "safety_settings_b64 = \"e30=\" # @param {isTemplate: true}\n",
+ "\n",
+ "contents = json.loads(base64.b64decode(contents_b64))\n",
+ "\n",
+ "generation_config = json.loads(base64.b64decode(generation_config_b64))\n",
+ "safety_settings = json.loads(base64.b64decode(safety_settings_b64))\n",
+ "\n",
+ "stream = False\n",
+ "\n",
+ "print(json.dumps(contents, indent=4))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "E7zAD69vE92b"
+ },
+ "source": [
+ "## Call `generate_content`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "LB2LxPmAB95V"
+ },
+ "outputs": [],
+ "source": [
+ "from IPython.display import display\n",
+ "from IPython.display import Markdown\n",
+ "\n",
+ "# Call the model and print the response.\n",
+ "gemini = genai.GenerativeModel(model_name=model)\n",
+ "\n",
+ "response = gemini.generate_content(\n",
+ " contents,\n",
+ " generation_config=generation_config,\n",
+ " safety_settings=safety_settings,\n",
+ " stream=stream,\n",
+ ")\n",
+ "\n",
+ "display(Markdown(response.text))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "9c9d345e9868"
+ },
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [
+ "Tce3stUlHN0L"
+ ],
+ "name": "aistudio_gemini_prompt_freeform_nofiles.ipynb",
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/docs/api/google/generativeai.md b/docs/api/google/generativeai.md
new file mode 100644
index 000000000..5b3931f08
--- /dev/null
+++ b/docs/api/google/generativeai.md
@@ -0,0 +1,128 @@
+
+# Module: google.generativeai
+
+
+
+
+
+
+
+Google AI Python SDK
+
+
+
+## Setup
+
+```posix-terminal
+pip install google-generativeai
+```
+
+## GenerativeModel
+
+Use `genai.GenerativeModel` to access the API:
+
+```
+import google.generativeai as genai
+import os
+
+genai.configure(api_key=os.environ['API_KEY'])
+
+model = genai.GenerativeModel(model_name='gemini-1.5-flash')
+response = model.generate_content('Teach me about how an LLM works')
+
+print(response.text)
+```
+
+See the [python quickstart](https://ai.google.dev/tutorials/python_quickstart) for more details.
+
+## Modules
+
+[`caching`](../google/generativeai/caching.md) module
+
+[`protos`](../google/generativeai/protos.md) module: This module provides low level access to the ProtoBuffer "Message" classes used by the API.
+
+[`types`](../google/generativeai/types.md) module: A collection of type definitions used throughout the library.
+
+## Classes
+
+[`class ChatSession`](../google/generativeai/ChatSession.md): Contains an ongoing conversation with the model.
+
+[`class GenerationConfig`](../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+[`class GenerativeModel`](../google/generativeai/GenerativeModel.md): The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
+
+## Functions
+
+[`configure(...)`](../google/generativeai/configure.md): Captures default client configuration.
+
+[`create_tuned_model(...)`](../google/generativeai/create_tuned_model.md): Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
+
+[`delete_file(...)`](../google/generativeai/delete_file.md): Calls the API to permanently delete a specified file using a supported file service.
+
+[`delete_tuned_model(...)`](../google/generativeai/delete_tuned_model.md): Calls the API to delete a specified tuned model
+
+[`embed_content(...)`](../google/generativeai/embed_content.md): Calls the API to create embeddings for content passed in.
+
+[`embed_content_async(...)`](../google/generativeai/embed_content_async.md): Calls the API to create async embeddings for content passed in.
+
+[`get_base_model(...)`](../google/generativeai/get_base_model.md): Calls the API to fetch a base model by name.
+
+[`get_file(...)`](../google/generativeai/get_file.md): Calls the API to retrieve a specified file using a supported file service.
+
+[`get_model(...)`](../google/generativeai/get_model.md): Calls the API to fetch a model by name.
+
+[`get_operation(...)`](../google/generativeai/get_operation.md): Calls the API to get a specific operation
+
+[`get_tuned_model(...)`](../google/generativeai/get_tuned_model.md): Calls the API to fetch a tuned model by name.
+
+[`list_files(...)`](../google/generativeai/list_files.md): Calls the API to list files using a supported file service.
+
+[`list_models(...)`](../google/generativeai/list_models.md): Calls the API to list all available models.
+
+[`list_operations(...)`](../google/generativeai/list_operations.md): Calls the API to list all operations
+
+[`list_tuned_models(...)`](../google/generativeai/list_tuned_models.md): Calls the API to list all tuned models.
+
+[`update_tuned_model(...)`](../google/generativeai/update_tuned_model.md): Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
+
+[`upload_file(...)`](../google/generativeai/upload_file.md): Calls the API to upload a file using a supported file service.
+
+
+
+
+
+
+Other Members |
+
+
+
+
+__version__
+
+ |
+
+
+`'0.8.3'`
+
+ |
+
+
+
+annotations
+
+ |
+
+
+Instance of `__future__._Feature`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/ChatSession.md b/docs/api/google/generativeai/ChatSession.md
new file mode 100644
index 000000000..442c59e02
--- /dev/null
+++ b/docs/api/google/generativeai/ChatSession.md
@@ -0,0 +1,244 @@
+
+# google.generativeai.ChatSession
+
+
+
+
+
+
+
+Contains an ongoing conversation with the model.
+
+
+google.generativeai.ChatSession(
+ model: GenerativeModel,
+ history: (Iterable[content_types.StrictContentType] | None) = None,
+ enable_automatic_function_calling: bool = False
+)
+
+
+
+
+
+
+```
+>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Hello")
+>>> print(response.text)
+>>> response = chat.send_message("Hello again")
+>>> print(response.text)
+>>> response = chat.send_message(...
+```
+
+This `ChatSession` object collects the messages sent and received, in its
+ChatSession.history
attribute.
+
+
+
+
+Arguments |
+
+
+
+
+`model`
+
+ |
+
+
+The model to use in the chat.
+
+ |
+
+
+
+`history`
+
+ |
+
+
+A chat history to initialize the object with.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`history`
+
+ |
+
+
+The chat history.
+
+ |
+
+
+
+`last`
+
+ |
+
+
+returns the last received `genai.GenerateContentResponse`
+
+ |
+
+
+
+
+
+## Methods
+
+rewind
+
+View source
+
+
+rewind() -> tuple[protos.Content, protos.Content]
+
+
+Removes the last request/response pair from the chat history.
+
+
+send_message
+
+View source
+
+
+send_message(
+ content: content_types.ContentType,
+ *,
+ generation_config: generation_types.GenerationConfigType = None,
+ safety_settings: safety_types.SafetySettingOptions = None,
+ stream: bool = False,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> generation_types.GenerateContentResponse
+
+
+Sends the conversation history with the added message and returns the model's response.
+
+Appends the request and response to the conversation history.
+
+```
+>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Hello")
+>>> print(response.text)
+"Hello! How can I assist you today?"
+>>> len(chat.history)
+2
+```
+
+Call it with `stream=True` to receive response chunks as they are generated:
+
+```
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Explain quantum physics", stream=True)
+>>> for chunk in response:
+... print(chunk.text, end='')
+```
+
+Once iteration over chunks is complete, the `response` and `ChatSession` are in states identical to the
+`stream=False` case. Some properties are not available until iteration is complete.
+
+Like GenerativeModel.generate_content
this method lets you override the model's `generation_config` and
+`safety_settings`.
+
+
+
+
+Arguments |
+
+
+
+
+`content`
+
+ |
+
+
+The message contents.
+
+ |
+
+
+
+`generation_config`
+
+ |
+
+
+Overrides for the model's generation config.
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+Overrides for the model's safety settings.
+
+ |
+
+
+
+`stream`
+
+ |
+
+
+If True, yield response chunks as they are generated.
+
+ |
+
+
+
+
+
+send_message_async
+
+View source
+
+
+send_message_async(
+ content,
+ *,
+ generation_config=None,
+ safety_settings=None,
+ stream=False,
+ tools=None,
+ tool_config=None,
+ request_options=None
+)
+
+
+The async version of ChatSession.send_message
.
+
+
+
+
diff --git a/docs/api/google/generativeai/GenerativeModel.md b/docs/api/google/generativeai/GenerativeModel.md
new file mode 100644
index 000000000..f9b0ccb7d
--- /dev/null
+++ b/docs/api/google/generativeai/GenerativeModel.md
@@ -0,0 +1,478 @@
+
+# google.generativeai.GenerativeModel
+
+
+
+
+
+
+
+The `genai.GenerativeModel` class wraps default parameters for calls to GenerativeModel.generate_content
, GenerativeModel.count_tokens
, and GenerativeModel.start_chat
.
+
+
+google.generativeai.GenerativeModel(
+ model_name: str = 'gemini-1.5-flash-002',
+ safety_settings: (safety_types.SafetySettingOptions | None) = None,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ system_instruction: (content_types.ContentType | None) = None
+)
+
+
+
+
+
+
+This family of functionality is designed to support multi-turn conversations, and multimodal
+requests. What media-types are supported for input and output is model-dependant.
+
+```
+>>> import google.generativeai as genai
+>>> import PIL.Image
+>>> genai.configure(api_key='YOUR_API_KEY')
+>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
+>>> result = model.generate_content('Tell me a story about a magic backpack')
+>>> result.text
+"In the quaint little town of Lakeside, there lived a young girl named Lily..."
+```
+
+#### Multimodal input:
+
+
+
+```
+>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
+>>> result = model.generate_content([
+... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
+>>> result.text
+"**Blueberry Scones** ..."
+```
+
+Multi-turn conversation:
+
+```
+>>> chat = model.start_chat()
+>>> response = chat.send_message("Hi, I have some questions for you.")
+>>> response.text
+"Sure, I'll do my best to answer your questions..."
+```
+
+To list the compatible model names use:
+
+```
+>>> for m in genai.list_models():
+... if 'generateContent' in m.supported_generation_methods:
+... print(m.name)
+```
+
+
+
+
+Arguments |
+
+
+
+
+`model_name`
+
+ |
+
+
+The name of the model to query. To list compatible models use
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+Sets the default safety filters. This controls which content is blocked
+by the api before being returned.
+
+ |
+
+
+
+`generation_config`
+
+ |
+
+
+A `genai.GenerationConfig` setting the default generation parameters to
+use.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`cached_content`
+
+ |
+
+
+
+
+ |
+
+
+
+`model_name`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+count_tokens
+
+View source
+
+
+count_tokens(
+ contents: content_types.ContentsType = None,
+ *,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ safety_settings: (safety_types.SafetySettingOptions | None) = None,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> protos.CountTokensResponse
+
+
+
+
+
+count_tokens_async
+
+View source
+
+
+count_tokens_async(
+ contents=None,
+ *,
+ generation_config=None,
+ safety_settings=None,
+ tools=None,
+ tool_config=None,
+ request_options=None
+)
+
+
+
+
+
+from_cached_content
+
+View source
+
+
+@classmethod
+from_cached_content(
+ cached_content: (str | caching.CachedContent),
+ *,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ safety_settings: (safety_types.SafetySettingOptions | None) = None
+) -> GenerativeModel
+
+
+Creates a model with `cached_content` as model's context.
+
+
+
+
+
+Args |
+
+
+
+
+`cached_content`
+
+ |
+
+
+context for the model.
+
+ |
+
+
+
+`generation_config`
+
+ |
+
+
+Overrides for the model's generation config.
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+Overrides for the model's safety settings.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+`GenerativeModel` object with `cached_content` as its context.
+
+ |
+
+
+
+
+
+
+generate_content
+
+View source
+
+
+generate_content(
+ contents: content_types.ContentsType,
+ *,
+ generation_config: (generation_types.GenerationConfigType | None) = None,
+ safety_settings: (safety_types.SafetySettingOptions | None) = None,
+ stream: bool = False,
+ tools: (content_types.FunctionLibraryType | None) = None,
+ tool_config: (content_types.ToolConfigType | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> generation_types.GenerateContentResponse
+
+
+A multipurpose function to generate responses from the model.
+
+This GenerativeModel.generate_content
method can handle multimodal input, and multi-turn
+conversations.
+
+```
+>>> model = genai.GenerativeModel('models/gemini-1.5-flash')
+>>> response = model.generate_content('Tell me a story about a magic backpack')
+>>> response.text
+```
+
+### Streaming
+
+This method supports streaming with the `stream=True`. The result has the same type as the non streaming case,
+but you can iterate over the response chunks as they become available:
+
+```
+>>> response = model.generate_content('Tell me a story about a magic backpack', stream=True)
+>>> for chunk in response:
+... print(chunk.text)
+```
+
+### Multi-turn
+
+This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each
+request. This takes some manual management but gives you complete control:
+
+```
+>>> messages = [{'role':'user', 'parts': ['hello']}]
+>>> response = model.generate_content(messages) # "Hello, how can I help"
+>>> messages.append(response.candidates[0].content)
+>>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']})
+>>> response = model.generate_content(messages)
+```
+
+For a simpler multi-turn interface see GenerativeModel.start_chat
.
+
+### Input type flexibility
+
+While the underlying API strictly expects a `list[protos.Content]` objects, this method
+will convert the user input into the correct type. The hierarchy of types that can be
+converted is below. Any of these objects can be passed as an equivalent `dict`.
+
+* `Iterable[protos.Content]`
+* protos.Content
+* `Iterable[protos.Part]`
+* protos.Part
+* `str`, `Image`, or protos.Blob
+
+In an `Iterable[protos.Content]` each `content` is a separate message.
+But note that an `Iterable[protos.Part]` is taken as the parts of a single message.
+
+
+
+
+Arguments |
+
+
+
+
+`contents`
+
+ |
+
+
+The contents serving as the model's prompt.
+
+ |
+
+
+
+`generation_config`
+
+ |
+
+
+Overrides for the model's generation config.
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+Overrides for the model's safety settings.
+
+ |
+
+
+
+`stream`
+
+ |
+
+
+If True, yield response chunks as they are generated.
+
+ |
+
+
+
+`tools`
+
+ |
+
+
+`protos.Tools` more info coming soon.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+generate_content_async
+
+View source
+
+
+generate_content_async(
+ contents,
+ *,
+ generation_config=None,
+ safety_settings=None,
+ stream=False,
+ tools=None,
+ tool_config=None,
+ request_options=None
+)
+
+
+The async version of GenerativeModel.generate_content
.
+
+
+start_chat
+
+View source
+
+
+start_chat(
+ *,
+ history: (Iterable[content_types.StrictContentType] | None) = None,
+ enable_automatic_function_calling: bool = False
+) -> ChatSession
+
+
+Returns a `genai.ChatSession` attached to this model.
+
+```
+>>> model = genai.GenerativeModel()
+>>> chat = model.start_chat(history=[...])
+>>> response = chat.send_message("Hello?")
+```
+
+
+
+
+Arguments |
+
+
+
+
+`history`
+
+ |
+
+
+An iterable of protos.Content objects, or equivalents to initialize the session.
+
+ |
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/_api_cache.json b/docs/api/google/generativeai/_api_cache.json
new file mode 100644
index 000000000..518937a44
--- /dev/null
+++ b/docs/api/google/generativeai/_api_cache.json
@@ -0,0 +1,10387 @@
+{
+ "duplicate_of": {
+ "google.generativeai.ChatSession.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.ChatSession.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.ChatSession.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.ChatSession.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.ChatSession.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.ChatSession.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.ChatSession.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.GenerationConfig": "google.generativeai.types.GenerationConfig",
+ "google.generativeai.GenerationConfig.__eq__": "google.generativeai.types.GenerationConfig.__eq__",
+ "google.generativeai.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.GenerationConfig.__init__": "google.generativeai.types.GenerationConfig.__init__",
+ "google.generativeai.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.GenerationConfig.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.GenerativeModel.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.GenerativeModel.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.GenerativeModel.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.GenerativeModel.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.GenerativeModel.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.GenerativeModel.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.GenerativeModel.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.annotations": "google.generativeai.caching.annotations",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.AttributionSourceId.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.AttributionSourceId.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.AttributionSourceId.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.AttributionSourceId.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.AttributionSourceId.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.AttributionSourceId.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.AttributionSourceId.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.AttributionSourceId.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchCreateChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchCreateChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchDeleteChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedContentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedContentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchEmbedTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchUpdateChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.BatchUpdateChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Blob.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Blob.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Blob.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Blob.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Blob.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Blob.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Blob.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Blob.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CachedContent.UsageMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CachedContent.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CachedContent.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CachedContent.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CachedContent.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CachedContent.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CachedContent.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CachedContent.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CachedContent.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Candidate.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Candidate.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Candidate.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Candidate.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Candidate.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Chunk.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Chunk.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Chunk.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Chunk.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Chunk.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Chunk.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Chunk.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Chunk.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Chunk.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Chunk.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Chunk.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Chunk.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Chunk.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Chunk.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Chunk.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Chunk.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Chunk.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Chunk.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Chunk.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Chunk.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Chunk.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Chunk.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Chunk.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Chunk.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Chunk.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Chunk.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Chunk.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Chunk.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Chunk.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Chunk.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Chunk.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Chunk.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Chunk.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Chunk.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Chunk.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Chunk.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Chunk.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Chunk.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Chunk.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Chunk.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Chunk.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Chunk.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Chunk.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Chunk.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.Chunk.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Chunk.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Chunk.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Chunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Chunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Chunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Chunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Chunk.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Chunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Chunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Chunk.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ChunkData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ChunkData.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ChunkData.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ChunkData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ChunkData.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ChunkData.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ChunkData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ChunkData.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CitationMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CitationMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CitationMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CitationMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CitationMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CitationMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CitationMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CitationMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CitationSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CitationSource.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CitationSource.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CitationSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CitationSource.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CitationSource.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CitationSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CitationSource.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CodeExecution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CodeExecution.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CodeExecution.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CodeExecution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CodeExecution.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CodeExecution.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CodeExecution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CodeExecution.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.CodeExecutionResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CodeExecutionResult.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CodeExecutionResult.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CodeExecutionResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CodeExecutionResult.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CodeExecutionResult.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CodeExecutionResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CodeExecutionResult.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Condition.Operator.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Condition.Operator.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Condition.Operator.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Condition.Operator.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Condition.Operator.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Condition.Operator.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Condition.Operator.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Condition.Operator.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Condition.Operator.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Condition.Operator.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Condition.Operator.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Condition.Operator.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Condition.Operator.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Condition.Operator.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Condition.Operator.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Condition.Operator.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Condition.Operator.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Condition.Operator.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Condition.Operator.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Condition.Operator.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Condition.Operator.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Condition.Operator.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Condition.Operator.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Condition.Operator.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Condition.Operator.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Condition.Operator.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Condition.Operator.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Condition.Operator.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Condition.Operator.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Condition.Operator.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Condition.Operator.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Condition.Operator.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Condition.Operator.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Condition.Operator.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Condition.Operator.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Condition.Operator.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Condition.Operator.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Condition.Operator.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Condition.Operator.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Condition.Operator.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Condition.Operator.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Condition.Operator.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Condition.Operator.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Condition.Operator.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.Condition.Operator.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Condition.Operator.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Condition.Operator.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Condition.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Condition.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Condition.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Condition.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Condition.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Condition.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Condition.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Condition.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Content.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Content.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Content.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Content.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Content.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Content.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Content.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Content.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ContentEmbedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ContentEmbedding.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ContentEmbedding.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ContentEmbedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ContentEmbedding.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ContentEmbedding.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ContentEmbedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ContentEmbedding.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ContentFilter.BlockedReason": "google.generativeai.types.BlockedReason",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.ContentFilter.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.ContentFilter.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.ContentFilter.BlockedReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.ContentFilter.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.ContentFilter.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.ContentFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ContentFilter.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ContentFilter.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ContentFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ContentFilter.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ContentFilter.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ContentFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ContentFilter.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Corpus.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Corpus.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Corpus.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Corpus.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Corpus.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Corpus.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Corpus.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Corpus.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CountMessageTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountMessageTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CountMessageTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CountMessageTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountMessageTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CountMessageTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CountMessageTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountMessageTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CountMessageTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountMessageTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CountMessageTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CountMessageTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountMessageTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CountMessageTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CountMessageTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountMessageTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CountTextTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTextTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CountTextTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CountTextTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTextTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CountTextTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CountTextTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTextTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CountTextTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTextTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CountTextTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CountTextTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTextTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CountTextTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CountTextTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTextTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CountTokensRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTokensRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CountTokensRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CountTokensRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTokensRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CountTokensRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CountTokensRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTokensRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CountTokensResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CountTokensResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CountTokensResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CountTokensResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CountTokensResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CountTokensResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CountTokensResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CountTokensResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateFileResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateFileResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateFileResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateFileResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateFileResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateFileResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateFileResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateFileResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreatePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreatePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreatePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreatePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreatePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateTunedModelMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CreateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CreateTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CreateTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CreateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CreateTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CreateTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CreateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CreateTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.CustomMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.CustomMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.CustomMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.CustomMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.CustomMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.CustomMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.CustomMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.CustomMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Dataset.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Dataset.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Dataset.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Dataset.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Dataset.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Dataset.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Dataset.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Dataset.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeleteChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeleteChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeleteChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeleteChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeleteChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeleteCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeleteCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeleteCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeleteCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeleteCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeleteDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeleteDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeleteDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeleteDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeleteDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeleteFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeleteFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeleteFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeleteFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeleteFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeletePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeletePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeletePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeletePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeletePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeletePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeletePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeletePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DeleteTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Document.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Document.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Document.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Document.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Document.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Document.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Document.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Document.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.DynamicRetrievalConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.DynamicRetrievalConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.EmbedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.EmbedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.EmbedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.EmbedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.EmbedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.EmbedContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.EmbedContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.EmbedContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.EmbedContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.EmbedContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.EmbedTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.EmbedTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.EmbedTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.EmbedTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.EmbedTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.EmbedTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.EmbedTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.EmbedTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.EmbedTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.EmbedTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.EmbedTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.EmbedTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.EmbedTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Embedding.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Embedding.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Embedding.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Embedding.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Embedding.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Embedding.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Embedding.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Embedding.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Example.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Example.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Example.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Example.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Example.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Example.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Example.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Example.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ExecutableCode.Language.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.ExecutableCode.Language.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.ExecutableCode.Language.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.ExecutableCode.Language.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.ExecutableCode.Language.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.ExecutableCode.Language.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.ExecutableCode.Language.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.ExecutableCode.Language.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.ExecutableCode.Language.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.ExecutableCode.Language.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.ExecutableCode.Language.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.ExecutableCode.Language.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.ExecutableCode.Language.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.ExecutableCode.Language.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.ExecutableCode.Language.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.ExecutableCode.Language.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.ExecutableCode.Language.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.ExecutableCode.Language.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.ExecutableCode.Language.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.ExecutableCode.Language.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.ExecutableCode.Language.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.ExecutableCode.Language.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.ExecutableCode.Language.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.ExecutableCode.Language.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.ExecutableCode.Language.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.ExecutableCode.Language.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.ExecutableCode.Language.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.ExecutableCode.Language.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.ExecutableCode.Language.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.ExecutableCode.Language.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.ExecutableCode.Language.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.ExecutableCode.Language.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.ExecutableCode.Language.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.ExecutableCode.Language.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.ExecutableCode.Language.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.ExecutableCode.Language.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.ExecutableCode.Language.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.ExecutableCode.Language.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.ExecutableCode.Language.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.ExecutableCode.Language.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.ExecutableCode.Language.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.ExecutableCode.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ExecutableCode.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ExecutableCode.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ExecutableCode.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ExecutableCode.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ExecutableCode.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ExecutableCode.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ExecutableCode.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.File.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.File.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.File.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.File.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.File.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.File.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.File.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.File.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.File.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.File.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.File.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.File.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.File.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.File.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.File.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.File.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.File.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.File.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.File.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.File.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.File.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.File.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.File.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.File.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.File.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.File.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.File.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.File.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.File.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.File.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.File.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.File.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.File.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.File.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.File.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.File.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.File.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.File.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.File.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.File.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.File.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.File.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.File.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.File.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.File.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.File.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.File.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.File.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.File.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.File.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.File.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.File.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.File.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.File.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.File.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.FileData.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FileData.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.FileData.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.FileData.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FileData.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.FileData.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.FileData.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FileData.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.FunctionCall.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionCall.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.FunctionCall.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.FunctionCall.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionCall.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.FunctionCall.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.FunctionCall.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionCall.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.FunctionCallingConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionCallingConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.FunctionCallingConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.FunctionCallingConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionCallingConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.FunctionCallingConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.FunctionCallingConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionCallingConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.FunctionDeclaration.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.FunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.FunctionDeclaration.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.FunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.FunctionDeclaration.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.FunctionResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.FunctionResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.FunctionResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.FunctionResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.FunctionResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.FunctionResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.FunctionResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.FunctionResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.GenerateAnswerRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateAnswerRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateAnswerRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateAnswerRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateAnswerRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateAnswerRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateAnswerRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateAnswerRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateAnswerResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateAnswerResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateAnswerResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateAnswerResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateAnswerResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateAnswerResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateAnswerResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateAnswerResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateContentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateContentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateContentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateMessageRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateMessageRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateMessageRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateMessageRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateMessageRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateMessageRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateMessageRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateMessageRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateMessageResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateMessageResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateMessageResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateMessageResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateMessageResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateMessageResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateMessageResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateMessageResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateTextRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateTextRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateTextRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateTextRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateTextRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateTextRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateTextRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateTextRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerateTextResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerateTextResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerateTextResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerateTextResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerateTextResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerateTextResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerateTextResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerateTextResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GenerationConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GenerationConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GenerationConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetFileRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetFileRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetFileRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetFileRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetFileRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetFileRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetFileRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetFileRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetPermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetPermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetPermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetPermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetPermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetPermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetPermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetPermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GetTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GetTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GetTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GetTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GetTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GetTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GetTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GetTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GoogleSearchRetrieval.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingAttribution.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingAttribution.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingAttribution.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingAttribution.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingAttribution.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingAttribution.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingAttribution.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingAttribution.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingChunk.Web.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingChunk.Web.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingChunk.Web.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingChunk.Web.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingChunk.Web.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingChunk.Web.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingChunk.Web.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingChunk.Web.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingChunk.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingChunk.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingPassage.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingPassage.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingPassage.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingPassage.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingPassage.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingPassage.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingPassage.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingPassage.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingPassages.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingPassages.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingPassages.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingPassages.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingPassages.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingPassages.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingPassages.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingPassages.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.GroundingSupport.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.GroundingSupport.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.GroundingSupport.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.GroundingSupport.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.GroundingSupport.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.GroundingSupport.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.GroundingSupport.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.GroundingSupport.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.HarmCategory.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Hyperparameters.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Hyperparameters.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Hyperparameters.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Hyperparameters.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Hyperparameters.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Hyperparameters.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Hyperparameters.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Hyperparameters.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListCachedContentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCachedContentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListCachedContentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListCachedContentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCachedContentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListCachedContentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListCachedContentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCachedContentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListCachedContentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCachedContentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListCachedContentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListCachedContentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCachedContentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListCachedContentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListCachedContentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCachedContentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListChunksRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListChunksRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListChunksRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListChunksRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListChunksRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListChunksRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListChunksRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListChunksRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListChunksResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListChunksResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListChunksResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListChunksResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListChunksResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListChunksResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListChunksResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListChunksResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListCorporaRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCorporaRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListCorporaRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListCorporaRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCorporaRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListCorporaRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListCorporaRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCorporaRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListCorporaResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListCorporaResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListCorporaResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListCorporaResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListCorporaResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListCorporaResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListCorporaResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListCorporaResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListDocumentsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListDocumentsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListDocumentsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListDocumentsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListDocumentsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListDocumentsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListDocumentsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListDocumentsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListDocumentsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListDocumentsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListDocumentsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListDocumentsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListDocumentsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListDocumentsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListDocumentsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListDocumentsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListFilesRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListFilesRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListFilesRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListFilesRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListFilesRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListFilesRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListFilesRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListFilesRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListFilesResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListFilesResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListFilesResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListFilesResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListFilesResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListFilesResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListFilesResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListFilesResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListModelsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListModelsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListModelsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListModelsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListModelsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListModelsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListModelsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListModelsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListModelsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListModelsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListPermissionsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListPermissionsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListPermissionsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListPermissionsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListPermissionsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListPermissionsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListPermissionsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListPermissionsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListPermissionsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListPermissionsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListPermissionsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListPermissionsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListPermissionsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListPermissionsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListPermissionsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListPermissionsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListTunedModelsRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListTunedModelsRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListTunedModelsRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListTunedModelsRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListTunedModelsRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListTunedModelsRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListTunedModelsRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListTunedModelsRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ListTunedModelsResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ListTunedModelsResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ListTunedModelsResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ListTunedModelsResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ListTunedModelsResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ListTunedModelsResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ListTunedModelsResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ListTunedModelsResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.LogprobsResult.Candidate.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.LogprobsResult.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.LogprobsResult.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.LogprobsResult.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.LogprobsResult.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.LogprobsResult.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.LogprobsResult.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.LogprobsResult.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.LogprobsResult.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Message.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Message.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Message.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Message.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Message.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Message.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Message.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Message.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.MessagePrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.MessagePrompt.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.MessagePrompt.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.MessagePrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.MessagePrompt.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.MessagePrompt.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.MessagePrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.MessagePrompt.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.MetadataFilter.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.MetadataFilter.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.MetadataFilter.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.MetadataFilter.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.MetadataFilter.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.MetadataFilter.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.MetadataFilter.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.MetadataFilter.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Model.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Model.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Model.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Model.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Model.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Model.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Model.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Model.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Part.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Part.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Part.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Part.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Part.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Part.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Part.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Part.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Permission.GranteeType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Permission.GranteeType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Permission.GranteeType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Permission.GranteeType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Permission.GranteeType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Permission.GranteeType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Permission.GranteeType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Permission.GranteeType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Permission.GranteeType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Permission.GranteeType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Permission.GranteeType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Permission.GranteeType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Permission.GranteeType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Permission.GranteeType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Permission.GranteeType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Permission.GranteeType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Permission.GranteeType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Permission.GranteeType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Permission.GranteeType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Permission.GranteeType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Permission.GranteeType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Permission.GranteeType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Permission.GranteeType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Permission.GranteeType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Permission.GranteeType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Permission.GranteeType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Permission.GranteeType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Permission.GranteeType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Permission.GranteeType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Permission.GranteeType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Permission.GranteeType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Permission.GranteeType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Permission.GranteeType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Permission.GranteeType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Permission.GranteeType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Permission.GranteeType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Permission.GranteeType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Permission.GranteeType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Permission.GranteeType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Permission.GranteeType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Permission.GranteeType.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Permission.GranteeType.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.Permission.GranteeType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Permission.GranteeType.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Permission.GranteeType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Permission.Role.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Permission.Role.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Permission.Role.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Permission.Role.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Permission.Role.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Permission.Role.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Permission.Role.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Permission.Role.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Permission.Role.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Permission.Role.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Permission.Role.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Permission.Role.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Permission.Role.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Permission.Role.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Permission.Role.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Permission.Role.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Permission.Role.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Permission.Role.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Permission.Role.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Permission.Role.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Permission.Role.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Permission.Role.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Permission.Role.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Permission.Role.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Permission.Role.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Permission.Role.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Permission.Role.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Permission.Role.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Permission.Role.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Permission.Role.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Permission.Role.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Permission.Role.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Permission.Role.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Permission.Role.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Permission.Role.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Permission.Role.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Permission.Role.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Permission.Role.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Permission.Role.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Permission.Role.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Permission.Role.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Permission.Role.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Permission.Role.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Permission.Role.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.Permission.Role.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Permission.Role.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Permission.Role.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.Permission.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Permission.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Permission.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Permission.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Permission.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Permission.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Permission.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Permission.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.PredictRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.PredictRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.PredictRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.PredictRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.PredictRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.PredictRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.PredictRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.PredictRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.PredictResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.PredictResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.PredictResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.PredictResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.PredictResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.PredictResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.PredictResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.PredictResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.QueryCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.QueryCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.QueryCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.QueryCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.QueryCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.QueryCorpusResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryCorpusResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.QueryCorpusResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.QueryCorpusResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryCorpusResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.QueryCorpusResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.QueryCorpusResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryCorpusResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.QueryDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.QueryDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.QueryDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.QueryDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.QueryDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.QueryDocumentResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.QueryDocumentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.QueryDocumentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.QueryDocumentResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.QueryDocumentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.QueryDocumentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.QueryDocumentResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.QueryDocumentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.RelevantChunk.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.RelevantChunk.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.RelevantChunk.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.RelevantChunk.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.RelevantChunk.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.RelevantChunk.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.RelevantChunk.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.RelevantChunk.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.RetrievalMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.RetrievalMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.RetrievalMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.RetrievalMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.RetrievalMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.RetrievalMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.RetrievalMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.RetrievalMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.SafetyFeedback.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SafetyFeedback.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.SafetyFeedback.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.SafetyFeedback.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SafetyFeedback.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.SafetyFeedback.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.SafetyFeedback.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SafetyFeedback.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.SafetyRating.HarmProbability": "google.generativeai.types.HarmProbability",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.SafetyRating.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.SafetyRating.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.SafetyRating.HarmProbability.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.SafetyRating.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.SafetyRating.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.SafetyRating.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SafetyRating.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.SafetyRating.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.SafetyRating.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SafetyRating.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.SafetyRating.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.SafetyRating.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SafetyRating.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold": "google.generativeai.types.HarmBlockThreshold",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.SafetySetting.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SafetySetting.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.SafetySetting.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.SafetySetting.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SafetySetting.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.SafetySetting.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.SafetySetting.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SafetySetting.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Schema.PropertiesEntry.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Schema.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Schema.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Schema.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Schema.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Schema.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Schema.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Schema.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Schema.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.SearchEntryPoint.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SearchEntryPoint.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.SearchEntryPoint.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.SearchEntryPoint.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SearchEntryPoint.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.SearchEntryPoint.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.SearchEntryPoint.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SearchEntryPoint.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Segment.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Segment.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Segment.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Segment.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Segment.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Segment.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Segment.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Segment.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.SemanticRetrieverConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.StringList.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.StringList.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.StringList.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.StringList.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.StringList.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.StringList.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.StringList.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.StringList.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TaskType.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.TaskType.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.TaskType.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.TaskType.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.TaskType.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.TaskType.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.TaskType.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.TaskType.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.TaskType.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.TaskType.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.TaskType.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.TaskType.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.TaskType.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.TaskType.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.TaskType.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.TaskType.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.TaskType.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.TaskType.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.TaskType.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.TaskType.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.TaskType.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.TaskType.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.TaskType.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.TaskType.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.TaskType.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.TaskType.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.TaskType.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.TaskType.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.TaskType.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.TaskType.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.TaskType.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.TaskType.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.TaskType.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.TaskType.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.TaskType.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.TaskType.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.TaskType.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.TaskType.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.TaskType.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.TaskType.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.TaskType.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.TaskType.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.TaskType.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.TaskType.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.TaskType.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.TaskType.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.TaskType.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.TextCompletion.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TextCompletion.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TextCompletion.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TextCompletion.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TextCompletion.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TextCompletion.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TextCompletion.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TextCompletion.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TextPrompt.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TextPrompt.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TextPrompt.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TextPrompt.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TextPrompt.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TextPrompt.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TextPrompt.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TextPrompt.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Tool.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.Tool.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.Tool.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.Tool.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.Tool.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.Tool.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.Tool.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.Tool.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.ToolConfig.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.ToolConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.ToolConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.ToolConfig.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.ToolConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.ToolConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.ToolConfig.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.ToolConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TransferOwnershipRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TransferOwnershipRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TransferOwnershipRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TransferOwnershipRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TransferOwnershipRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TransferOwnershipRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TransferOwnershipRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TransferOwnershipRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TransferOwnershipResponse.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TransferOwnershipResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TransferOwnershipResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TransferOwnershipResponse.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TransferOwnershipResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TransferOwnershipResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TransferOwnershipResponse.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TransferOwnershipResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TunedModel.State": "google.generativeai.types.TunedModelState",
+ "google.generativeai.protos.TunedModel.State.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.TunedModel.State.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.TunedModel.State.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.TunedModel.State.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.TunedModel.State.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.TunedModel.State.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.TunedModel.State.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.TunedModel.State.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.TunedModel.State.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.TunedModel.State.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.TunedModel.State.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.TunedModel.State.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.TunedModel.State.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.TunedModel.State.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.TunedModel.State.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.TunedModel.State.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.TunedModel.State.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.TunedModel.State.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.TunedModel.State.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.TunedModel.State.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.TunedModel.State.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.TunedModel.State.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.TunedModel.State.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.TunedModel.State.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.TunedModel.State.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.TunedModel.State.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.TunedModel.State.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.TunedModel.State.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.TunedModel.State.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.TunedModel.State.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.TunedModel.State.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.TunedModel.State.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.TunedModel.State.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.TunedModel.State.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.TunedModel.State.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.TunedModel.State.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.TunedModel.State.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.TunedModel.State.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.TunedModel.State.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.TunedModel.State.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.TunedModel.State.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.TunedModel.State.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.TunedModel.State.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.TunedModel.State.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.TunedModel.State.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.TunedModel.State.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.TunedModel.State.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.TunedModel.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TunedModel.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TunedModel.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TunedModel.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TunedModel.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TunedModel.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TunedModel.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TunedModel.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TunedModelSource.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TunedModelSource.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TunedModelSource.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TunedModelSource.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TunedModelSource.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TunedModelSource.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TunedModelSource.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TunedModelSource.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TuningExample.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningExample.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TuningExample.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TuningExample.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningExample.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TuningExample.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TuningExample.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningExample.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TuningExamples.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningExamples.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TuningExamples.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TuningExamples.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningExamples.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TuningExamples.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TuningExamples.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningExamples.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TuningSnapshot.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningSnapshot.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TuningSnapshot.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TuningSnapshot.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningSnapshot.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TuningSnapshot.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TuningSnapshot.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningSnapshot.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.TuningTask.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.TuningTask.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.TuningTask.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.TuningTask.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.TuningTask.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.TuningTask.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.TuningTask.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.TuningTask.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.Type.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.protos.Type.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.protos.Type.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.protos.Type.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.protos.Type.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.protos.Type.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.protos.Type.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.protos.Type.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.protos.Type.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.protos.Type.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.protos.Type.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.protos.Type.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.protos.Type.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.protos.Type.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.protos.Type.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.protos.Type.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.protos.Type.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.protos.Type.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.protos.Type.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.protos.Type.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.protos.Type.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.protos.Type.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.protos.Type.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.protos.Type.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.protos.Type.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.protos.Type.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.protos.Type.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.protos.Type.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.protos.Type.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.protos.Type.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.protos.Type.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.protos.Type.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.protos.Type.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.protos.Type.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.protos.Type.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.protos.Type.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.protos.Type.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.protos.Type.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.protos.Type.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.protos.Type.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.protos.Type.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.protos.Type.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.protos.Type.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.protos.Type.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.protos.Type.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.protos.Type.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.protos.Type.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.protos.UpdateCachedContentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateCachedContentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.UpdateChunkRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateChunkRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.UpdateChunkRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.UpdateChunkRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateChunkRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.UpdateChunkRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.UpdateChunkRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateChunkRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.UpdateCorpusRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateCorpusRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.UpdateCorpusRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.UpdateCorpusRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateCorpusRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.UpdateCorpusRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.UpdateCorpusRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateCorpusRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.UpdateDocumentRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateDocumentRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.UpdateDocumentRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.UpdateDocumentRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateDocumentRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.UpdateDocumentRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.UpdateDocumentRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateDocumentRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.UpdatePermissionRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdatePermissionRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.UpdatePermissionRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.UpdatePermissionRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdatePermissionRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.UpdatePermissionRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.UpdatePermissionRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdatePermissionRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.UpdateTunedModelRequest.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.protos.VideoMetadata.__eq__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__",
+ "google.generativeai.protos.VideoMetadata.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.protos.VideoMetadata.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.protos.VideoMetadata.__init__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__",
+ "google.generativeai.protos.VideoMetadata.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.protos.VideoMetadata.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.protos.VideoMetadata.__ne__": "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__",
+ "google.generativeai.protos.VideoMetadata.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.AsyncGenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.BlockedPromptException.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.BlockedPromptException.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.BlockedPromptException.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.BlockedPromptException.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.BlockedPromptException.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.BlockedPromptException.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.BlockedReason.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.BlockedReason.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.BlockedReason.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.BlockedReason.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.BlockedReason.__contains__": "google.generativeai.protos.ContentFilter.BlockedReason.__contains__",
+ "google.generativeai.types.BlockedReason.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.BlockedReason.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.BlockedReason.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.BlockedReason.__getitem__": "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__",
+ "google.generativeai.types.BlockedReason.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.BlockedReason.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.BlockedReason.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.BlockedReason.__iter__": "google.generativeai.protos.ContentFilter.BlockedReason.__iter__",
+ "google.generativeai.types.BlockedReason.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.BlockedReason.__len__": "google.generativeai.protos.ContentFilter.BlockedReason.__len__",
+ "google.generativeai.types.BlockedReason.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.BlockedReason.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.BlockedReason.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.BlockedReason.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.BlockedReason.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.BlockedReason.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.BlockedReason.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.BlockedReason.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.BlockedReason.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.BlockedReason.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.BlockedReason.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.BlockedReason.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.BlockedReason.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.BlockedReason.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.BlockedReason.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.BlockedReason.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.BlockedReason.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.BlockedReason.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.BlockedReason.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.BlockedReason.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.BlockedReason.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.BlockedReason.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.BlockedReason.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.BlockedReason.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.BlockedReason.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.BlockedReason.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.BlockedReason.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.BlockedReason.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.BlockedReason.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.BlockedReason.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.BlockedReason.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.BlockedReason.from_bytes": "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes",
+ "google.generativeai.types.BlockedReason.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.BlockedReason.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.types.BlockedReason.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.BlockedReason.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.BlockedReason.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.BrokenResponseError.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.BrokenResponseError.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.BrokenResponseError.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.BrokenResponseError.__init__": "google.generativeai.types.BlockedPromptException.__init__",
+ "google.generativeai.types.BrokenResponseError.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.BrokenResponseError.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.BrokenResponseError.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.BrokenResponseError.__new__": "google.generativeai.types.BlockedPromptException.__new__",
+ "google.generativeai.types.BrokenResponseError.add_note": "google.generativeai.types.BlockedPromptException.add_note",
+ "google.generativeai.types.BrokenResponseError.args": "google.generativeai.types.BlockedPromptException.args",
+ "google.generativeai.types.BrokenResponseError.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback",
+ "google.generativeai.types.CallableFunctionDeclaration.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.CallableFunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.CallableFunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.CallableFunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.CallableFunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.CallableFunctionDeclaration.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.CallableFunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.CallableFunctionDeclaration.description": "google.generativeai.types.FunctionDeclaration.description",
+ "google.generativeai.types.CallableFunctionDeclaration.from_function": "google.generativeai.types.FunctionDeclaration.from_function",
+ "google.generativeai.types.CallableFunctionDeclaration.name": "google.generativeai.types.FunctionDeclaration.name",
+ "google.generativeai.types.CallableFunctionDeclaration.parameters": "google.generativeai.types.FunctionDeclaration.parameters",
+ "google.generativeai.types.CallableFunctionDeclaration.to_proto": "google.generativeai.types.FunctionDeclaration.to_proto",
+ "google.generativeai.types.CitationMetadataDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.CitationMetadataDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.CitationMetadataDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.CitationMetadataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.CitationMetadataDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.CitationMetadataDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.CitationMetadataDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.CitationMetadataDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.CitationMetadataDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.CitationMetadataDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.CitationMetadataDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.CitationMetadataDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.CitationMetadataDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.CitationMetadataDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.CitationMetadataDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.CitationMetadataDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.CitationMetadataDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.CitationMetadataDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.CitationMetadataDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.CitationMetadataDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.CitationMetadataDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.CitationMetadataDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.CitationMetadataDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.CitationMetadataDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.CitationSourceDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.CitationSourceDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.CitationSourceDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.CitationSourceDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.CitationSourceDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.CitationSourceDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.CitationSourceDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.CitationSourceDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.CitationSourceDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.CitationSourceDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.CitationSourceDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.CitationSourceDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.CitationSourceDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.CitationSourceDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.CitationSourceDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.CitationSourceDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.CitationSourceDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.CitationSourceDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.CitationSourceDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.CitationSourceDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.CitationSourceDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.CitationSourceDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.CitationSourceDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.CitationSourceDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.ContentDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ContentDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ContentDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ContentDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ContentDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ContentDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ContentDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ContentDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ContentDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ContentDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ContentDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ContentDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ContentDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ContentDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ContentDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ContentDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ContentDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ContentDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ContentDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ContentDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ContentDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ContentDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ContentDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ContentDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.ContentFilterDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ContentFilterDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ContentFilterDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ContentFilterDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ContentFilterDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ContentFilterDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ContentFilterDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ContentFilterDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ContentFilterDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ContentFilterDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ContentFilterDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ContentFilterDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ContentFilterDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ContentFilterDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ContentFilterDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ContentFilterDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ContentFilterDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ContentFilterDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ContentFilterDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ContentFilterDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ContentFilterDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ContentFilterDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ContentFilterDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ContentFilterDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.File.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.File.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.File.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.File.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.File.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.File.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.File.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.FileDataDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.FileDataDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.FileDataDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.FileDataDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.FileDataDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.FileDataDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.FileDataDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.FileDataDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.FileDataDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.FileDataDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.FileDataDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.FileDataDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.FileDataDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.FileDataDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.FileDataDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.FileDataDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.FileDataDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.FileDataDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.FileDataDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.FileDataDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.FileDataDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.FileDataDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.FileDataDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.FileDataDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.FunctionDeclaration.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.FunctionDeclaration.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.FunctionDeclaration.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.FunctionDeclaration.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.FunctionDeclaration.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.FunctionDeclaration.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.FunctionDeclaration.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.FunctionLibrary.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.FunctionLibrary.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.FunctionLibrary.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.FunctionLibrary.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.FunctionLibrary.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.FunctionLibrary.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.FunctionLibrary.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.GenerateContentResponse.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.GenerateContentResponse.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.GenerateContentResponse.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.GenerateContentResponse.__init__": "google.generativeai.types.AsyncGenerateContentResponse.__init__",
+ "google.generativeai.types.GenerateContentResponse.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.GenerateContentResponse.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.GenerateContentResponse.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.GenerateContentResponse.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.GenerateContentResponse.candidates": "google.generativeai.types.AsyncGenerateContentResponse.candidates",
+ "google.generativeai.types.GenerateContentResponse.parts": "google.generativeai.types.AsyncGenerateContentResponse.parts",
+ "google.generativeai.types.GenerateContentResponse.prompt_feedback": "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback",
+ "google.generativeai.types.GenerateContentResponse.text": "google.generativeai.types.AsyncGenerateContentResponse.text",
+ "google.generativeai.types.GenerateContentResponse.to_dict": "google.generativeai.types.AsyncGenerateContentResponse.to_dict",
+ "google.generativeai.types.GenerateContentResponse.usage_metadata": "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata",
+ "google.generativeai.types.GenerationConfig.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.GenerationConfig.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.GenerationConfig.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.GenerationConfig.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.GenerationConfig.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.GenerationConfig.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.GenerationConfigDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.GenerationConfigDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.GenerationConfigDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.GenerationConfigDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.GenerationConfigDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.GenerationConfigDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.GenerationConfigDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.GenerationConfigDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.GenerationConfigDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.GenerationConfigDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.GenerationConfigDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.GenerationConfigDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.GenerationConfigDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.GenerationConfigDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.GenerationConfigDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.GenerationConfigDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.GenerationConfigDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.GenerationConfigDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.GenerationConfigDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.GenerationConfigDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.GenerationConfigDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.GenerationConfigDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.GenerationConfigDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.GenerationConfigDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.HarmBlockThreshold.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.HarmBlockThreshold.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.HarmBlockThreshold.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.HarmBlockThreshold.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.HarmBlockThreshold.__contains__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__",
+ "google.generativeai.types.HarmBlockThreshold.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.HarmBlockThreshold.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.HarmBlockThreshold.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.HarmBlockThreshold.__getitem__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__",
+ "google.generativeai.types.HarmBlockThreshold.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.HarmBlockThreshold.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.HarmBlockThreshold.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.HarmBlockThreshold.__iter__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__",
+ "google.generativeai.types.HarmBlockThreshold.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.HarmBlockThreshold.__len__": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__",
+ "google.generativeai.types.HarmBlockThreshold.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.HarmBlockThreshold.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.HarmBlockThreshold.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.HarmBlockThreshold.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.HarmBlockThreshold.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.HarmBlockThreshold.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.HarmBlockThreshold.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.HarmBlockThreshold.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.HarmBlockThreshold.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.HarmBlockThreshold.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.HarmBlockThreshold.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.HarmBlockThreshold.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.HarmBlockThreshold.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.HarmBlockThreshold.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.HarmBlockThreshold.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.HarmBlockThreshold.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.HarmBlockThreshold.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.HarmBlockThreshold.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.HarmBlockThreshold.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.HarmBlockThreshold.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.HarmBlockThreshold.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.HarmBlockThreshold.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.HarmBlockThreshold.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.HarmBlockThreshold.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.HarmBlockThreshold.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.HarmBlockThreshold.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.HarmBlockThreshold.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.HarmBlockThreshold.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.HarmBlockThreshold.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.HarmBlockThreshold.from_bytes": "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes",
+ "google.generativeai.types.HarmBlockThreshold.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.HarmBlockThreshold.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.types.HarmBlockThreshold.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.HarmBlockThreshold.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.HarmBlockThreshold.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.HarmCategory.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.HarmCategory.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.HarmCategory.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.HarmCategory.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.HarmCategory.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.HarmCategory.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.HarmCategory.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.HarmCategory.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.HarmCategory.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.HarmCategory.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.HarmCategory.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.HarmCategory.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.HarmCategory.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.HarmCategory.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.HarmCategory.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.HarmCategory.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.HarmCategory.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.HarmCategory.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.HarmCategory.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.HarmCategory.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.HarmCategory.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.HarmCategory.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.HarmCategory.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.HarmCategory.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.HarmCategory.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.HarmCategory.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.HarmCategory.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.HarmCategory.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.HarmCategory.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.HarmCategory.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.HarmCategory.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.HarmCategory.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.HarmCategory.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.HarmCategory.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.HarmCategory.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.HarmCategory.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.HarmCategory.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.HarmCategory.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.HarmCategory.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.HarmCategory.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.HarmCategory.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.HarmCategory.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.HarmCategory.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.HarmCategory.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.types.HarmCategory.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.HarmCategory.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.HarmCategory.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.HarmProbability.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.HarmProbability.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.HarmProbability.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.HarmProbability.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.HarmProbability.__contains__": "google.generativeai.protos.SafetyRating.HarmProbability.__contains__",
+ "google.generativeai.types.HarmProbability.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.HarmProbability.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.HarmProbability.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.HarmProbability.__getitem__": "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__",
+ "google.generativeai.types.HarmProbability.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.HarmProbability.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.HarmProbability.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.HarmProbability.__iter__": "google.generativeai.protos.SafetyRating.HarmProbability.__iter__",
+ "google.generativeai.types.HarmProbability.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.HarmProbability.__len__": "google.generativeai.protos.SafetyRating.HarmProbability.__len__",
+ "google.generativeai.types.HarmProbability.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.HarmProbability.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.HarmProbability.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.HarmProbability.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.HarmProbability.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.HarmProbability.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.HarmProbability.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.HarmProbability.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.HarmProbability.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.HarmProbability.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.HarmProbability.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.HarmProbability.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.HarmProbability.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.HarmProbability.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.HarmProbability.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.HarmProbability.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.HarmProbability.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.HarmProbability.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.HarmProbability.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.HarmProbability.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.HarmProbability.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.HarmProbability.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.HarmProbability.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.HarmProbability.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.HarmProbability.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.HarmProbability.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.HarmProbability.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.HarmProbability.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.HarmProbability.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.HarmProbability.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.HarmProbability.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.HarmProbability.from_bytes": "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes",
+ "google.generativeai.types.HarmProbability.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.HarmProbability.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.types.HarmProbability.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.HarmProbability.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.HarmProbability.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.IncompleteIterationError.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.IncompleteIterationError.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.IncompleteIterationError.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.IncompleteIterationError.__init__": "google.generativeai.types.BlockedPromptException.__init__",
+ "google.generativeai.types.IncompleteIterationError.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.IncompleteIterationError.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.IncompleteIterationError.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.IncompleteIterationError.__new__": "google.generativeai.types.BlockedPromptException.__new__",
+ "google.generativeai.types.IncompleteIterationError.add_note": "google.generativeai.types.BlockedPromptException.add_note",
+ "google.generativeai.types.IncompleteIterationError.args": "google.generativeai.types.BlockedPromptException.args",
+ "google.generativeai.types.IncompleteIterationError.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback",
+ "google.generativeai.types.Model.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.Model.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.Model.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.Model.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.Model.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.Model.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.ModelNameOptions": "google.generativeai.types.AnyModelNameOptions",
+ "google.generativeai.types.PartDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.PartDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.PartDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.PartDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.PartDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.PartDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.PartDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.PartDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.PartDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.PartDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.PartDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.PartDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.PartDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.PartDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.PartDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.PartDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.PartDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.PartDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.PartDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.PartDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.PartDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.PartDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.PartDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.PartDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.Permission.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.Permission.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.Permission.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.Permission.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.Permission.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.Permission.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.Permissions.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.Permissions.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.Permissions.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.Permissions.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.Permissions.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.Permissions.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.Permissions.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.RequestOptions.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.RequestOptions.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.RequestOptions.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.RequestOptions.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.RequestOptions.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.RequestOptions.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.SafetyFeedbackDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.SafetyFeedbackDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.SafetyFeedbackDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.SafetyFeedbackDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.SafetyFeedbackDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.SafetyFeedbackDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.SafetyFeedbackDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.SafetyFeedbackDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.SafetyFeedbackDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.SafetyFeedbackDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.SafetyFeedbackDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.SafetyFeedbackDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.SafetyFeedbackDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.SafetyFeedbackDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.SafetyFeedbackDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.SafetyFeedbackDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.SafetyFeedbackDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.SafetyFeedbackDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.SafetyFeedbackDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.SafetyFeedbackDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.SafetyFeedbackDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.SafetyFeedbackDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.SafetyFeedbackDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.SafetyFeedbackDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.SafetyRatingDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.SafetyRatingDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.SafetyRatingDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.SafetyRatingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.SafetyRatingDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.SafetyRatingDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.SafetyRatingDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.SafetyRatingDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.SafetyRatingDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.SafetyRatingDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.SafetyRatingDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.SafetyRatingDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.SafetyRatingDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.SafetyRatingDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.SafetyRatingDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.SafetyRatingDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.SafetyRatingDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.SafetyRatingDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.SafetyRatingDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.SafetyRatingDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.SafetyRatingDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.SafetyRatingDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.SafetyRatingDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.SafetyRatingDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.SafetySettingDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.SafetySettingDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.SafetySettingDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.SafetySettingDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.SafetySettingDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.SafetySettingDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.SafetySettingDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.SafetySettingDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.SafetySettingDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.SafetySettingDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.SafetySettingDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.SafetySettingDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.SafetySettingDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.SafetySettingDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.SafetySettingDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.SafetySettingDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.SafetySettingDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.SafetySettingDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.SafetySettingDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.SafetySettingDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.SafetySettingDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.SafetySettingDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.SafetySettingDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.SafetySettingDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.StopCandidateException.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.StopCandidateException.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.StopCandidateException.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.StopCandidateException.__init__": "google.generativeai.types.BlockedPromptException.__init__",
+ "google.generativeai.types.StopCandidateException.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.StopCandidateException.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.StopCandidateException.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.StopCandidateException.__new__": "google.generativeai.types.BlockedPromptException.__new__",
+ "google.generativeai.types.StopCandidateException.add_note": "google.generativeai.types.BlockedPromptException.add_note",
+ "google.generativeai.types.StopCandidateException.args": "google.generativeai.types.BlockedPromptException.args",
+ "google.generativeai.types.StopCandidateException.with_traceback": "google.generativeai.types.BlockedPromptException.with_traceback",
+ "google.generativeai.types.Tool.__eq__": "google.generativeai.caching.CachedContent.__eq__",
+ "google.generativeai.types.Tool.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.Tool.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.Tool.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.Tool.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.Tool.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.Tool.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.ToolDict.__contains__": "google.generativeai.types.BlobDict.__contains__",
+ "google.generativeai.types.ToolDict.__eq__": "google.generativeai.types.BlobDict.__eq__",
+ "google.generativeai.types.ToolDict.__ge__": "google.generativeai.types.BlobDict.__ge__",
+ "google.generativeai.types.ToolDict.__getitem__": "google.generativeai.types.BlobDict.__getitem__",
+ "google.generativeai.types.ToolDict.__gt__": "google.generativeai.types.BlobDict.__gt__",
+ "google.generativeai.types.ToolDict.__init__": "google.generativeai.types.BlobDict.__init__",
+ "google.generativeai.types.ToolDict.__iter__": "google.generativeai.types.BlobDict.__iter__",
+ "google.generativeai.types.ToolDict.__le__": "google.generativeai.types.BlobDict.__le__",
+ "google.generativeai.types.ToolDict.__len__": "google.generativeai.types.BlobDict.__len__",
+ "google.generativeai.types.ToolDict.__lt__": "google.generativeai.types.BlobDict.__lt__",
+ "google.generativeai.types.ToolDict.__ne__": "google.generativeai.types.BlobDict.__ne__",
+ "google.generativeai.types.ToolDict.__new__": "google.generativeai.types.BlobDict.__new__",
+ "google.generativeai.types.ToolDict.__or__": "google.generativeai.types.BlobDict.__or__",
+ "google.generativeai.types.ToolDict.__ror__": "google.generativeai.types.BlobDict.__ror__",
+ "google.generativeai.types.ToolDict.clear": "google.generativeai.types.BlobDict.clear",
+ "google.generativeai.types.ToolDict.copy": "google.generativeai.types.BlobDict.copy",
+ "google.generativeai.types.ToolDict.get": "google.generativeai.types.BlobDict.get",
+ "google.generativeai.types.ToolDict.items": "google.generativeai.types.BlobDict.items",
+ "google.generativeai.types.ToolDict.keys": "google.generativeai.types.BlobDict.keys",
+ "google.generativeai.types.ToolDict.pop": "google.generativeai.types.BlobDict.pop",
+ "google.generativeai.types.ToolDict.popitem": "google.generativeai.types.BlobDict.popitem",
+ "google.generativeai.types.ToolDict.setdefault": "google.generativeai.types.BlobDict.setdefault",
+ "google.generativeai.types.ToolDict.update": "google.generativeai.types.BlobDict.update",
+ "google.generativeai.types.ToolDict.values": "google.generativeai.types.BlobDict.values",
+ "google.generativeai.types.TunedModel.__ge__": "google.generativeai.caching.CachedContent.__ge__",
+ "google.generativeai.types.TunedModel.__gt__": "google.generativeai.caching.CachedContent.__gt__",
+ "google.generativeai.types.TunedModel.__le__": "google.generativeai.caching.CachedContent.__le__",
+ "google.generativeai.types.TunedModel.__lt__": "google.generativeai.caching.CachedContent.__lt__",
+ "google.generativeai.types.TunedModel.__ne__": "google.generativeai.caching.CachedContent.__ne__",
+ "google.generativeai.types.TunedModel.__new__": "google.generativeai.caching.CachedContent.__new__",
+ "google.generativeai.types.TunedModelState.__abs__": "google.generativeai.protos.Candidate.FinishReason.__abs__",
+ "google.generativeai.types.TunedModelState.__add__": "google.generativeai.protos.Candidate.FinishReason.__add__",
+ "google.generativeai.types.TunedModelState.__and__": "google.generativeai.protos.Candidate.FinishReason.__and__",
+ "google.generativeai.types.TunedModelState.__bool__": "google.generativeai.protos.Candidate.FinishReason.__bool__",
+ "google.generativeai.types.TunedModelState.__contains__": "google.generativeai.protos.TunedModel.State.__contains__",
+ "google.generativeai.types.TunedModelState.__eq__": "google.generativeai.protos.Candidate.FinishReason.__eq__",
+ "google.generativeai.types.TunedModelState.__floordiv__": "google.generativeai.protos.Candidate.FinishReason.__floordiv__",
+ "google.generativeai.types.TunedModelState.__ge__": "google.generativeai.protos.Candidate.FinishReason.__ge__",
+ "google.generativeai.types.TunedModelState.__getitem__": "google.generativeai.protos.TunedModel.State.__getitem__",
+ "google.generativeai.types.TunedModelState.__gt__": "google.generativeai.protos.Candidate.FinishReason.__gt__",
+ "google.generativeai.types.TunedModelState.__init__": "google.generativeai.protos.Candidate.FinishReason.__init__",
+ "google.generativeai.types.TunedModelState.__invert__": "google.generativeai.protos.Candidate.FinishReason.__invert__",
+ "google.generativeai.types.TunedModelState.__iter__": "google.generativeai.protos.TunedModel.State.__iter__",
+ "google.generativeai.types.TunedModelState.__le__": "google.generativeai.protos.Candidate.FinishReason.__le__",
+ "google.generativeai.types.TunedModelState.__len__": "google.generativeai.protos.TunedModel.State.__len__",
+ "google.generativeai.types.TunedModelState.__lshift__": "google.generativeai.protos.Candidate.FinishReason.__lshift__",
+ "google.generativeai.types.TunedModelState.__lt__": "google.generativeai.protos.Candidate.FinishReason.__lt__",
+ "google.generativeai.types.TunedModelState.__mod__": "google.generativeai.protos.Candidate.FinishReason.__mod__",
+ "google.generativeai.types.TunedModelState.__mul__": "google.generativeai.protos.Candidate.FinishReason.__mul__",
+ "google.generativeai.types.TunedModelState.__ne__": "google.generativeai.protos.Candidate.FinishReason.__ne__",
+ "google.generativeai.types.TunedModelState.__neg__": "google.generativeai.protos.Candidate.FinishReason.__neg__",
+ "google.generativeai.types.TunedModelState.__new__": "google.generativeai.protos.Candidate.FinishReason.__new__",
+ "google.generativeai.types.TunedModelState.__or__": "google.generativeai.protos.Candidate.FinishReason.__or__",
+ "google.generativeai.types.TunedModelState.__pos__": "google.generativeai.protos.Candidate.FinishReason.__pos__",
+ "google.generativeai.types.TunedModelState.__pow__": "google.generativeai.protos.Candidate.FinishReason.__pow__",
+ "google.generativeai.types.TunedModelState.__radd__": "google.generativeai.protos.Candidate.FinishReason.__radd__",
+ "google.generativeai.types.TunedModelState.__rand__": "google.generativeai.protos.Candidate.FinishReason.__rand__",
+ "google.generativeai.types.TunedModelState.__rfloordiv__": "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__",
+ "google.generativeai.types.TunedModelState.__rlshift__": "google.generativeai.protos.Candidate.FinishReason.__rlshift__",
+ "google.generativeai.types.TunedModelState.__rmod__": "google.generativeai.protos.Candidate.FinishReason.__rmod__",
+ "google.generativeai.types.TunedModelState.__rmul__": "google.generativeai.protos.Candidate.FinishReason.__rmul__",
+ "google.generativeai.types.TunedModelState.__ror__": "google.generativeai.protos.Candidate.FinishReason.__ror__",
+ "google.generativeai.types.TunedModelState.__rpow__": "google.generativeai.protos.Candidate.FinishReason.__rpow__",
+ "google.generativeai.types.TunedModelState.__rrshift__": "google.generativeai.protos.Candidate.FinishReason.__rrshift__",
+ "google.generativeai.types.TunedModelState.__rshift__": "google.generativeai.protos.Candidate.FinishReason.__rshift__",
+ "google.generativeai.types.TunedModelState.__rsub__": "google.generativeai.protos.Candidate.FinishReason.__rsub__",
+ "google.generativeai.types.TunedModelState.__rtruediv__": "google.generativeai.protos.Candidate.FinishReason.__rtruediv__",
+ "google.generativeai.types.TunedModelState.__rxor__": "google.generativeai.protos.Candidate.FinishReason.__rxor__",
+ "google.generativeai.types.TunedModelState.__sub__": "google.generativeai.protos.Candidate.FinishReason.__sub__",
+ "google.generativeai.types.TunedModelState.__truediv__": "google.generativeai.protos.Candidate.FinishReason.__truediv__",
+ "google.generativeai.types.TunedModelState.__xor__": "google.generativeai.protos.Candidate.FinishReason.__xor__",
+ "google.generativeai.types.TunedModelState.as_integer_ratio": "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio",
+ "google.generativeai.types.TunedModelState.bit_count": "google.generativeai.protos.Candidate.FinishReason.bit_count",
+ "google.generativeai.types.TunedModelState.bit_length": "google.generativeai.protos.Candidate.FinishReason.bit_length",
+ "google.generativeai.types.TunedModelState.conjugate": "google.generativeai.protos.Candidate.FinishReason.conjugate",
+ "google.generativeai.types.TunedModelState.denominator": "google.generativeai.protos.Candidate.FinishReason.denominator",
+ "google.generativeai.types.TunedModelState.from_bytes": "google.generativeai.protos.TunedModel.State.from_bytes",
+ "google.generativeai.types.TunedModelState.imag": "google.generativeai.protos.Candidate.FinishReason.imag",
+ "google.generativeai.types.TunedModelState.is_integer": "google.generativeai.protos.Candidate.FinishReason.is_integer",
+ "google.generativeai.types.TunedModelState.numerator": "google.generativeai.protos.Candidate.FinishReason.numerator",
+ "google.generativeai.types.TunedModelState.real": "google.generativeai.protos.Candidate.FinishReason.real",
+ "google.generativeai.types.TunedModelState.to_bytes": "google.generativeai.protos.Candidate.FinishReason.to_bytes",
+ "google.generativeai.types.annotations": "google.generativeai.caching.annotations"
+ },
+ "is_fragment": {
+ "google.generativeai": false,
+ "google.generativeai.ChatSession": false,
+ "google.generativeai.ChatSession.__eq__": true,
+ "google.generativeai.ChatSession.__ge__": true,
+ "google.generativeai.ChatSession.__gt__": true,
+ "google.generativeai.ChatSession.__init__": true,
+ "google.generativeai.ChatSession.__le__": true,
+ "google.generativeai.ChatSession.__lt__": true,
+ "google.generativeai.ChatSession.__ne__": true,
+ "google.generativeai.ChatSession.__new__": true,
+ "google.generativeai.ChatSession.history": true,
+ "google.generativeai.ChatSession.last": true,
+ "google.generativeai.ChatSession.rewind": true,
+ "google.generativeai.ChatSession.send_message": true,
+ "google.generativeai.ChatSession.send_message_async": true,
+ "google.generativeai.GenerationConfig": false,
+ "google.generativeai.GenerationConfig.__eq__": true,
+ "google.generativeai.GenerationConfig.__ge__": true,
+ "google.generativeai.GenerationConfig.__gt__": true,
+ "google.generativeai.GenerationConfig.__init__": true,
+ "google.generativeai.GenerationConfig.__le__": true,
+ "google.generativeai.GenerationConfig.__lt__": true,
+ "google.generativeai.GenerationConfig.__ne__": true,
+ "google.generativeai.GenerationConfig.__new__": true,
+ "google.generativeai.GenerationConfig.candidate_count": true,
+ "google.generativeai.GenerationConfig.frequency_penalty": true,
+ "google.generativeai.GenerationConfig.logprobs": true,
+ "google.generativeai.GenerationConfig.max_output_tokens": true,
+ "google.generativeai.GenerationConfig.presence_penalty": true,
+ "google.generativeai.GenerationConfig.response_logprobs": true,
+ "google.generativeai.GenerationConfig.response_mime_type": true,
+ "google.generativeai.GenerationConfig.response_schema": true,
+ "google.generativeai.GenerationConfig.seed": true,
+ "google.generativeai.GenerationConfig.stop_sequences": true,
+ "google.generativeai.GenerationConfig.temperature": true,
+ "google.generativeai.GenerationConfig.top_k": true,
+ "google.generativeai.GenerationConfig.top_p": true,
+ "google.generativeai.GenerativeModel": false,
+ "google.generativeai.GenerativeModel.__eq__": true,
+ "google.generativeai.GenerativeModel.__ge__": true,
+ "google.generativeai.GenerativeModel.__gt__": true,
+ "google.generativeai.GenerativeModel.__init__": true,
+ "google.generativeai.GenerativeModel.__le__": true,
+ "google.generativeai.GenerativeModel.__lt__": true,
+ "google.generativeai.GenerativeModel.__ne__": true,
+ "google.generativeai.GenerativeModel.__new__": true,
+ "google.generativeai.GenerativeModel.cached_content": true,
+ "google.generativeai.GenerativeModel.count_tokens": true,
+ "google.generativeai.GenerativeModel.count_tokens_async": true,
+ "google.generativeai.GenerativeModel.from_cached_content": true,
+ "google.generativeai.GenerativeModel.generate_content": true,
+ "google.generativeai.GenerativeModel.generate_content_async": true,
+ "google.generativeai.GenerativeModel.model_name": true,
+ "google.generativeai.GenerativeModel.start_chat": true,
+ "google.generativeai.__version__": true,
+ "google.generativeai.annotations": true,
+ "google.generativeai.caching": false,
+ "google.generativeai.caching.CachedContent": false,
+ "google.generativeai.caching.CachedContent.__eq__": true,
+ "google.generativeai.caching.CachedContent.__ge__": true,
+ "google.generativeai.caching.CachedContent.__gt__": true,
+ "google.generativeai.caching.CachedContent.__init__": true,
+ "google.generativeai.caching.CachedContent.__le__": true,
+ "google.generativeai.caching.CachedContent.__lt__": true,
+ "google.generativeai.caching.CachedContent.__ne__": true,
+ "google.generativeai.caching.CachedContent.__new__": true,
+ "google.generativeai.caching.CachedContent.create": true,
+ "google.generativeai.caching.CachedContent.create_time": true,
+ "google.generativeai.caching.CachedContent.delete": true,
+ "google.generativeai.caching.CachedContent.display_name": true,
+ "google.generativeai.caching.CachedContent.expire_time": true,
+ "google.generativeai.caching.CachedContent.get": true,
+ "google.generativeai.caching.CachedContent.list": true,
+ "google.generativeai.caching.CachedContent.model": true,
+ "google.generativeai.caching.CachedContent.name": true,
+ "google.generativeai.caching.CachedContent.update": true,
+ "google.generativeai.caching.CachedContent.update_time": true,
+ "google.generativeai.caching.CachedContent.usage_metadata": true,
+ "google.generativeai.caching.annotations": true,
+ "google.generativeai.caching.get_default_cache_client": false,
+ "google.generativeai.configure": false,
+ "google.generativeai.create_tuned_model": false,
+ "google.generativeai.delete_file": false,
+ "google.generativeai.delete_tuned_model": false,
+ "google.generativeai.embed_content": false,
+ "google.generativeai.embed_content_async": false,
+ "google.generativeai.get_base_model": false,
+ "google.generativeai.get_file": false,
+ "google.generativeai.get_model": false,
+ "google.generativeai.get_operation": false,
+ "google.generativeai.get_tuned_model": false,
+ "google.generativeai.list_files": false,
+ "google.generativeai.list_models": false,
+ "google.generativeai.list_operations": false,
+ "google.generativeai.list_tuned_models": false,
+ "google.generativeai.protos": false,
+ "google.generativeai.protos.AttributionSourceId": false,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId": false,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__call__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ge__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__gt__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__le__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__lt__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__new__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__or__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ror__": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.mro": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.part_index": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.passage_id": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": true,
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": false,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__call__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__eq__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ge__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__gt__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__init__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__le__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__lt__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ne__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__new__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__or__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.__ror__": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.chunk": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.mro": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.source": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": true,
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": true,
+ "google.generativeai.protos.AttributionSourceId.__call__": true,
+ "google.generativeai.protos.AttributionSourceId.__eq__": true,
+ "google.generativeai.protos.AttributionSourceId.__ge__": true,
+ "google.generativeai.protos.AttributionSourceId.__gt__": true,
+ "google.generativeai.protos.AttributionSourceId.__init__": true,
+ "google.generativeai.protos.AttributionSourceId.__le__": true,
+ "google.generativeai.protos.AttributionSourceId.__lt__": true,
+ "google.generativeai.protos.AttributionSourceId.__ne__": true,
+ "google.generativeai.protos.AttributionSourceId.__new__": true,
+ "google.generativeai.protos.AttributionSourceId.__or__": true,
+ "google.generativeai.protos.AttributionSourceId.__ror__": true,
+ "google.generativeai.protos.AttributionSourceId.copy_from": true,
+ "google.generativeai.protos.AttributionSourceId.deserialize": true,
+ "google.generativeai.protos.AttributionSourceId.from_json": true,
+ "google.generativeai.protos.AttributionSourceId.grounding_passage": true,
+ "google.generativeai.protos.AttributionSourceId.mro": true,
+ "google.generativeai.protos.AttributionSourceId.pb": true,
+ "google.generativeai.protos.AttributionSourceId.semantic_retriever_chunk": true,
+ "google.generativeai.protos.AttributionSourceId.serialize": true,
+ "google.generativeai.protos.AttributionSourceId.to_dict": true,
+ "google.generativeai.protos.AttributionSourceId.to_json": true,
+ "google.generativeai.protos.AttributionSourceId.wrap": true,
+ "google.generativeai.protos.BatchCreateChunksRequest": false,
+ "google.generativeai.protos.BatchCreateChunksRequest.__call__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__eq__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__ge__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__gt__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__init__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__le__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__lt__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__ne__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__new__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__or__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.__ror__": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.copy_from": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.deserialize": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.from_json": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.mro": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.parent": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.pb": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.requests": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.serialize": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.to_dict": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.to_json": true,
+ "google.generativeai.protos.BatchCreateChunksRequest.wrap": true,
+ "google.generativeai.protos.BatchCreateChunksResponse": false,
+ "google.generativeai.protos.BatchCreateChunksResponse.__call__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__eq__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__ge__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__gt__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__init__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__le__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__lt__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__ne__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__new__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__or__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.__ror__": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.chunks": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.copy_from": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.deserialize": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.from_json": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.mro": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.pb": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.serialize": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.to_dict": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.to_json": true,
+ "google.generativeai.protos.BatchCreateChunksResponse.wrap": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest": false,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__call__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__eq__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ge__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__gt__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__init__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__le__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__lt__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ne__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__new__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__or__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.__ror__": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.from_json": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.mro": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.parent": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.pb": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.requests": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.serialize": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_json": true,
+ "google.generativeai.protos.BatchDeleteChunksRequest.wrap": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest": false,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__call__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__eq__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ge__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__gt__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__init__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__le__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__lt__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ne__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__new__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__or__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.__ror__": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.from_json": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.model": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.mro": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.pb": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.requests": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.serialize": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_json": true,
+ "google.generativeai.protos.BatchEmbedContentsRequest.wrap": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse": false,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__call__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__eq__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ge__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__gt__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__init__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__le__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__lt__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ne__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__new__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__or__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.__ror__": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.embeddings": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.from_json": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.mro": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.pb": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.serialize": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_json": true,
+ "google.generativeai.protos.BatchEmbedContentsResponse.wrap": true,
+ "google.generativeai.protos.BatchEmbedTextRequest": false,
+ "google.generativeai.protos.BatchEmbedTextRequest.__call__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__eq__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__ge__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__gt__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__init__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__le__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__lt__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__ne__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__new__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__or__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.__ror__": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.copy_from": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.deserialize": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.from_json": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.model": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.mro": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.pb": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.requests": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.serialize": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.texts": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.to_dict": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.to_json": true,
+ "google.generativeai.protos.BatchEmbedTextRequest.wrap": true,
+ "google.generativeai.protos.BatchEmbedTextResponse": false,
+ "google.generativeai.protos.BatchEmbedTextResponse.__call__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__eq__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__ge__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__gt__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__init__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__le__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__lt__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__ne__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__new__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__or__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.__ror__": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.copy_from": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.deserialize": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.embeddings": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.from_json": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.mro": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.pb": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.serialize": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.to_dict": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.to_json": true,
+ "google.generativeai.protos.BatchEmbedTextResponse.wrap": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest": false,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__call__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__eq__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ge__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__gt__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__init__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__le__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__lt__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ne__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__new__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__or__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.__ror__": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.from_json": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.mro": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.parent": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.pb": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.requests": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.serialize": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_json": true,
+ "google.generativeai.protos.BatchUpdateChunksRequest.wrap": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse": false,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__call__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__eq__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ge__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__gt__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__init__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__le__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__lt__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ne__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__new__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__or__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.__ror__": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.chunks": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.from_json": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.mro": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.pb": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.serialize": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_json": true,
+ "google.generativeai.protos.BatchUpdateChunksResponse.wrap": true,
+ "google.generativeai.protos.Blob": false,
+ "google.generativeai.protos.Blob.__call__": true,
+ "google.generativeai.protos.Blob.__eq__": true,
+ "google.generativeai.protos.Blob.__ge__": true,
+ "google.generativeai.protos.Blob.__gt__": true,
+ "google.generativeai.protos.Blob.__init__": true,
+ "google.generativeai.protos.Blob.__le__": true,
+ "google.generativeai.protos.Blob.__lt__": true,
+ "google.generativeai.protos.Blob.__ne__": true,
+ "google.generativeai.protos.Blob.__new__": true,
+ "google.generativeai.protos.Blob.__or__": true,
+ "google.generativeai.protos.Blob.__ror__": true,
+ "google.generativeai.protos.Blob.copy_from": true,
+ "google.generativeai.protos.Blob.data": true,
+ "google.generativeai.protos.Blob.deserialize": true,
+ "google.generativeai.protos.Blob.from_json": true,
+ "google.generativeai.protos.Blob.mime_type": true,
+ "google.generativeai.protos.Blob.mro": true,
+ "google.generativeai.protos.Blob.pb": true,
+ "google.generativeai.protos.Blob.serialize": true,
+ "google.generativeai.protos.Blob.to_dict": true,
+ "google.generativeai.protos.Blob.to_json": true,
+ "google.generativeai.protos.Blob.wrap": true,
+ "google.generativeai.protos.CachedContent": false,
+ "google.generativeai.protos.CachedContent.UsageMetadata": false,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__call__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__eq__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ge__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__gt__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__init__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__le__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__lt__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ne__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__new__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__or__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.__ror__": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.from_json": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.mro": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.pb": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.serialize": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_json": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.total_token_count": true,
+ "google.generativeai.protos.CachedContent.UsageMetadata.wrap": true,
+ "google.generativeai.protos.CachedContent.__call__": true,
+ "google.generativeai.protos.CachedContent.__eq__": true,
+ "google.generativeai.protos.CachedContent.__ge__": true,
+ "google.generativeai.protos.CachedContent.__gt__": true,
+ "google.generativeai.protos.CachedContent.__init__": true,
+ "google.generativeai.protos.CachedContent.__le__": true,
+ "google.generativeai.protos.CachedContent.__lt__": true,
+ "google.generativeai.protos.CachedContent.__ne__": true,
+ "google.generativeai.protos.CachedContent.__new__": true,
+ "google.generativeai.protos.CachedContent.__or__": true,
+ "google.generativeai.protos.CachedContent.__ror__": true,
+ "google.generativeai.protos.CachedContent.contents": true,
+ "google.generativeai.protos.CachedContent.copy_from": true,
+ "google.generativeai.protos.CachedContent.create_time": true,
+ "google.generativeai.protos.CachedContent.deserialize": true,
+ "google.generativeai.protos.CachedContent.display_name": true,
+ "google.generativeai.protos.CachedContent.expire_time": true,
+ "google.generativeai.protos.CachedContent.from_json": true,
+ "google.generativeai.protos.CachedContent.model": true,
+ "google.generativeai.protos.CachedContent.mro": true,
+ "google.generativeai.protos.CachedContent.name": true,
+ "google.generativeai.protos.CachedContent.pb": true,
+ "google.generativeai.protos.CachedContent.serialize": true,
+ "google.generativeai.protos.CachedContent.system_instruction": true,
+ "google.generativeai.protos.CachedContent.to_dict": true,
+ "google.generativeai.protos.CachedContent.to_json": true,
+ "google.generativeai.protos.CachedContent.tool_config": true,
+ "google.generativeai.protos.CachedContent.tools": true,
+ "google.generativeai.protos.CachedContent.ttl": true,
+ "google.generativeai.protos.CachedContent.update_time": true,
+ "google.generativeai.protos.CachedContent.usage_metadata": true,
+ "google.generativeai.protos.CachedContent.wrap": true,
+ "google.generativeai.protos.Candidate": false,
+ "google.generativeai.protos.Candidate.FinishReason": false,
+ "google.generativeai.protos.Candidate.FinishReason.BLOCKLIST": true,
+ "google.generativeai.protos.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.Candidate.FinishReason.LANGUAGE": true,
+ "google.generativeai.protos.Candidate.FinishReason.MALFORMED_FUNCTION_CALL": true,
+ "google.generativeai.protos.Candidate.FinishReason.MAX_TOKENS": true,
+ "google.generativeai.protos.Candidate.FinishReason.OTHER": true,
+ "google.generativeai.protos.Candidate.FinishReason.PROHIBITED_CONTENT": true,
+ "google.generativeai.protos.Candidate.FinishReason.RECITATION": true,
+ "google.generativeai.protos.Candidate.FinishReason.SAFETY": true,
+ "google.generativeai.protos.Candidate.FinishReason.SPII": true,
+ "google.generativeai.protos.Candidate.FinishReason.STOP": true,
+ "google.generativeai.protos.Candidate.FinishReason.__abs__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__add__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__and__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__bool__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__contains__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__eq__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__floordiv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__ge__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__getitem__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__gt__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__init__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__invert__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__iter__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__le__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__len__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__lshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__lt__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__mod__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__mul__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__ne__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__neg__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__new__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__or__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__pos__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__pow__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__radd__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rand__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rfloordiv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rlshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rmod__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rmul__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__ror__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rpow__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rrshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rshift__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rsub__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rtruediv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__rxor__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__sub__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__truediv__": true,
+ "google.generativeai.protos.Candidate.FinishReason.__xor__": true,
+ "google.generativeai.protos.Candidate.FinishReason.as_integer_ratio": true,
+ "google.generativeai.protos.Candidate.FinishReason.bit_count": true,
+ "google.generativeai.protos.Candidate.FinishReason.bit_length": true,
+ "google.generativeai.protos.Candidate.FinishReason.conjugate": true,
+ "google.generativeai.protos.Candidate.FinishReason.denominator": true,
+ "google.generativeai.protos.Candidate.FinishReason.from_bytes": true,
+ "google.generativeai.protos.Candidate.FinishReason.imag": true,
+ "google.generativeai.protos.Candidate.FinishReason.is_integer": true,
+ "google.generativeai.protos.Candidate.FinishReason.numerator": true,
+ "google.generativeai.protos.Candidate.FinishReason.real": true,
+ "google.generativeai.protos.Candidate.FinishReason.to_bytes": true,
+ "google.generativeai.protos.Candidate.__call__": true,
+ "google.generativeai.protos.Candidate.__eq__": true,
+ "google.generativeai.protos.Candidate.__ge__": true,
+ "google.generativeai.protos.Candidate.__gt__": true,
+ "google.generativeai.protos.Candidate.__init__": true,
+ "google.generativeai.protos.Candidate.__le__": true,
+ "google.generativeai.protos.Candidate.__lt__": true,
+ "google.generativeai.protos.Candidate.__ne__": true,
+ "google.generativeai.protos.Candidate.__new__": true,
+ "google.generativeai.protos.Candidate.__or__": true,
+ "google.generativeai.protos.Candidate.__ror__": true,
+ "google.generativeai.protos.Candidate.avg_logprobs": true,
+ "google.generativeai.protos.Candidate.citation_metadata": true,
+ "google.generativeai.protos.Candidate.content": true,
+ "google.generativeai.protos.Candidate.copy_from": true,
+ "google.generativeai.protos.Candidate.deserialize": true,
+ "google.generativeai.protos.Candidate.finish_reason": true,
+ "google.generativeai.protos.Candidate.from_json": true,
+ "google.generativeai.protos.Candidate.grounding_attributions": true,
+ "google.generativeai.protos.Candidate.grounding_metadata": true,
+ "google.generativeai.protos.Candidate.index": true,
+ "google.generativeai.protos.Candidate.logprobs_result": true,
+ "google.generativeai.protos.Candidate.mro": true,
+ "google.generativeai.protos.Candidate.pb": true,
+ "google.generativeai.protos.Candidate.safety_ratings": true,
+ "google.generativeai.protos.Candidate.serialize": true,
+ "google.generativeai.protos.Candidate.to_dict": true,
+ "google.generativeai.protos.Candidate.to_json": true,
+ "google.generativeai.protos.Candidate.token_count": true,
+ "google.generativeai.protos.Candidate.wrap": true,
+ "google.generativeai.protos.Chunk": false,
+ "google.generativeai.protos.Chunk.State": false,
+ "google.generativeai.protos.Chunk.State.STATE_ACTIVE": true,
+ "google.generativeai.protos.Chunk.State.STATE_FAILED": true,
+ "google.generativeai.protos.Chunk.State.STATE_PENDING_PROCESSING": true,
+ "google.generativeai.protos.Chunk.State.STATE_UNSPECIFIED": true,
+ "google.generativeai.protos.Chunk.State.__abs__": true,
+ "google.generativeai.protos.Chunk.State.__add__": true,
+ "google.generativeai.protos.Chunk.State.__and__": true,
+ "google.generativeai.protos.Chunk.State.__bool__": true,
+ "google.generativeai.protos.Chunk.State.__contains__": true,
+ "google.generativeai.protos.Chunk.State.__eq__": true,
+ "google.generativeai.protos.Chunk.State.__floordiv__": true,
+ "google.generativeai.protos.Chunk.State.__ge__": true,
+ "google.generativeai.protos.Chunk.State.__getitem__": true,
+ "google.generativeai.protos.Chunk.State.__gt__": true,
+ "google.generativeai.protos.Chunk.State.__init__": true,
+ "google.generativeai.protos.Chunk.State.__invert__": true,
+ "google.generativeai.protos.Chunk.State.__iter__": true,
+ "google.generativeai.protos.Chunk.State.__le__": true,
+ "google.generativeai.protos.Chunk.State.__len__": true,
+ "google.generativeai.protos.Chunk.State.__lshift__": true,
+ "google.generativeai.protos.Chunk.State.__lt__": true,
+ "google.generativeai.protos.Chunk.State.__mod__": true,
+ "google.generativeai.protos.Chunk.State.__mul__": true,
+ "google.generativeai.protos.Chunk.State.__ne__": true,
+ "google.generativeai.protos.Chunk.State.__neg__": true,
+ "google.generativeai.protos.Chunk.State.__new__": true,
+ "google.generativeai.protos.Chunk.State.__or__": true,
+ "google.generativeai.protos.Chunk.State.__pos__": true,
+ "google.generativeai.protos.Chunk.State.__pow__": true,
+ "google.generativeai.protos.Chunk.State.__radd__": true,
+ "google.generativeai.protos.Chunk.State.__rand__": true,
+ "google.generativeai.protos.Chunk.State.__rfloordiv__": true,
+ "google.generativeai.protos.Chunk.State.__rlshift__": true,
+ "google.generativeai.protos.Chunk.State.__rmod__": true,
+ "google.generativeai.protos.Chunk.State.__rmul__": true,
+ "google.generativeai.protos.Chunk.State.__ror__": true,
+ "google.generativeai.protos.Chunk.State.__rpow__": true,
+ "google.generativeai.protos.Chunk.State.__rrshift__": true,
+ "google.generativeai.protos.Chunk.State.__rshift__": true,
+ "google.generativeai.protos.Chunk.State.__rsub__": true,
+ "google.generativeai.protos.Chunk.State.__rtruediv__": true,
+ "google.generativeai.protos.Chunk.State.__rxor__": true,
+ "google.generativeai.protos.Chunk.State.__sub__": true,
+ "google.generativeai.protos.Chunk.State.__truediv__": true,
+ "google.generativeai.protos.Chunk.State.__xor__": true,
+ "google.generativeai.protos.Chunk.State.as_integer_ratio": true,
+ "google.generativeai.protos.Chunk.State.bit_count": true,
+ "google.generativeai.protos.Chunk.State.bit_length": true,
+ "google.generativeai.protos.Chunk.State.conjugate": true,
+ "google.generativeai.protos.Chunk.State.denominator": true,
+ "google.generativeai.protos.Chunk.State.from_bytes": true,
+ "google.generativeai.protos.Chunk.State.imag": true,
+ "google.generativeai.protos.Chunk.State.is_integer": true,
+ "google.generativeai.protos.Chunk.State.numerator": true,
+ "google.generativeai.protos.Chunk.State.real": true,
+ "google.generativeai.protos.Chunk.State.to_bytes": true,
+ "google.generativeai.protos.Chunk.__call__": true,
+ "google.generativeai.protos.Chunk.__eq__": true,
+ "google.generativeai.protos.Chunk.__ge__": true,
+ "google.generativeai.protos.Chunk.__gt__": true,
+ "google.generativeai.protos.Chunk.__init__": true,
+ "google.generativeai.protos.Chunk.__le__": true,
+ "google.generativeai.protos.Chunk.__lt__": true,
+ "google.generativeai.protos.Chunk.__ne__": true,
+ "google.generativeai.protos.Chunk.__new__": true,
+ "google.generativeai.protos.Chunk.__or__": true,
+ "google.generativeai.protos.Chunk.__ror__": true,
+ "google.generativeai.protos.Chunk.copy_from": true,
+ "google.generativeai.protos.Chunk.create_time": true,
+ "google.generativeai.protos.Chunk.custom_metadata": true,
+ "google.generativeai.protos.Chunk.data": true,
+ "google.generativeai.protos.Chunk.deserialize": true,
+ "google.generativeai.protos.Chunk.from_json": true,
+ "google.generativeai.protos.Chunk.mro": true,
+ "google.generativeai.protos.Chunk.name": true,
+ "google.generativeai.protos.Chunk.pb": true,
+ "google.generativeai.protos.Chunk.serialize": true,
+ "google.generativeai.protos.Chunk.state": true,
+ "google.generativeai.protos.Chunk.to_dict": true,
+ "google.generativeai.protos.Chunk.to_json": true,
+ "google.generativeai.protos.Chunk.update_time": true,
+ "google.generativeai.protos.Chunk.wrap": true,
+ "google.generativeai.protos.ChunkData": false,
+ "google.generativeai.protos.ChunkData.__call__": true,
+ "google.generativeai.protos.ChunkData.__eq__": true,
+ "google.generativeai.protos.ChunkData.__ge__": true,
+ "google.generativeai.protos.ChunkData.__gt__": true,
+ "google.generativeai.protos.ChunkData.__init__": true,
+ "google.generativeai.protos.ChunkData.__le__": true,
+ "google.generativeai.protos.ChunkData.__lt__": true,
+ "google.generativeai.protos.ChunkData.__ne__": true,
+ "google.generativeai.protos.ChunkData.__new__": true,
+ "google.generativeai.protos.ChunkData.__or__": true,
+ "google.generativeai.protos.ChunkData.__ror__": true,
+ "google.generativeai.protos.ChunkData.copy_from": true,
+ "google.generativeai.protos.ChunkData.deserialize": true,
+ "google.generativeai.protos.ChunkData.from_json": true,
+ "google.generativeai.protos.ChunkData.mro": true,
+ "google.generativeai.protos.ChunkData.pb": true,
+ "google.generativeai.protos.ChunkData.serialize": true,
+ "google.generativeai.protos.ChunkData.string_value": true,
+ "google.generativeai.protos.ChunkData.to_dict": true,
+ "google.generativeai.protos.ChunkData.to_json": true,
+ "google.generativeai.protos.ChunkData.wrap": true,
+ "google.generativeai.protos.CitationMetadata": false,
+ "google.generativeai.protos.CitationMetadata.__call__": true,
+ "google.generativeai.protos.CitationMetadata.__eq__": true,
+ "google.generativeai.protos.CitationMetadata.__ge__": true,
+ "google.generativeai.protos.CitationMetadata.__gt__": true,
+ "google.generativeai.protos.CitationMetadata.__init__": true,
+ "google.generativeai.protos.CitationMetadata.__le__": true,
+ "google.generativeai.protos.CitationMetadata.__lt__": true,
+ "google.generativeai.protos.CitationMetadata.__ne__": true,
+ "google.generativeai.protos.CitationMetadata.__new__": true,
+ "google.generativeai.protos.CitationMetadata.__or__": true,
+ "google.generativeai.protos.CitationMetadata.__ror__": true,
+ "google.generativeai.protos.CitationMetadata.citation_sources": true,
+ "google.generativeai.protos.CitationMetadata.copy_from": true,
+ "google.generativeai.protos.CitationMetadata.deserialize": true,
+ "google.generativeai.protos.CitationMetadata.from_json": true,
+ "google.generativeai.protos.CitationMetadata.mro": true,
+ "google.generativeai.protos.CitationMetadata.pb": true,
+ "google.generativeai.protos.CitationMetadata.serialize": true,
+ "google.generativeai.protos.CitationMetadata.to_dict": true,
+ "google.generativeai.protos.CitationMetadata.to_json": true,
+ "google.generativeai.protos.CitationMetadata.wrap": true,
+ "google.generativeai.protos.CitationSource": false,
+ "google.generativeai.protos.CitationSource.__call__": true,
+ "google.generativeai.protos.CitationSource.__eq__": true,
+ "google.generativeai.protos.CitationSource.__ge__": true,
+ "google.generativeai.protos.CitationSource.__gt__": true,
+ "google.generativeai.protos.CitationSource.__init__": true,
+ "google.generativeai.protos.CitationSource.__le__": true,
+ "google.generativeai.protos.CitationSource.__lt__": true,
+ "google.generativeai.protos.CitationSource.__ne__": true,
+ "google.generativeai.protos.CitationSource.__new__": true,
+ "google.generativeai.protos.CitationSource.__or__": true,
+ "google.generativeai.protos.CitationSource.__ror__": true,
+ "google.generativeai.protos.CitationSource.copy_from": true,
+ "google.generativeai.protos.CitationSource.deserialize": true,
+ "google.generativeai.protos.CitationSource.end_index": true,
+ "google.generativeai.protos.CitationSource.from_json": true,
+ "google.generativeai.protos.CitationSource.license_": true,
+ "google.generativeai.protos.CitationSource.mro": true,
+ "google.generativeai.protos.CitationSource.pb": true,
+ "google.generativeai.protos.CitationSource.serialize": true,
+ "google.generativeai.protos.CitationSource.start_index": true,
+ "google.generativeai.protos.CitationSource.to_dict": true,
+ "google.generativeai.protos.CitationSource.to_json": true,
+ "google.generativeai.protos.CitationSource.uri": true,
+ "google.generativeai.protos.CitationSource.wrap": true,
+ "google.generativeai.protos.CodeExecution": false,
+ "google.generativeai.protos.CodeExecution.__call__": true,
+ "google.generativeai.protos.CodeExecution.__eq__": true,
+ "google.generativeai.protos.CodeExecution.__ge__": true,
+ "google.generativeai.protos.CodeExecution.__gt__": true,
+ "google.generativeai.protos.CodeExecution.__init__": true,
+ "google.generativeai.protos.CodeExecution.__le__": true,
+ "google.generativeai.protos.CodeExecution.__lt__": true,
+ "google.generativeai.protos.CodeExecution.__ne__": true,
+ "google.generativeai.protos.CodeExecution.__new__": true,
+ "google.generativeai.protos.CodeExecution.__or__": true,
+ "google.generativeai.protos.CodeExecution.__ror__": true,
+ "google.generativeai.protos.CodeExecution.copy_from": true,
+ "google.generativeai.protos.CodeExecution.deserialize": true,
+ "google.generativeai.protos.CodeExecution.from_json": true,
+ "google.generativeai.protos.CodeExecution.mro": true,
+ "google.generativeai.protos.CodeExecution.pb": true,
+ "google.generativeai.protos.CodeExecution.serialize": true,
+ "google.generativeai.protos.CodeExecution.to_dict": true,
+ "google.generativeai.protos.CodeExecution.to_json": true,
+ "google.generativeai.protos.CodeExecution.wrap": true,
+ "google.generativeai.protos.CodeExecutionResult": false,
+ "google.generativeai.protos.CodeExecutionResult.Outcome": false,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_DEADLINE_EXCEEDED": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_FAILED": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_OK": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.OUTCOME_UNSPECIFIED": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__abs__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__add__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__and__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__bool__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__eq__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__floordiv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ge__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__gt__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__init__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__invert__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__le__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__lt__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mod__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__mul__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ne__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__neg__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__new__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__or__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pos__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__pow__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__radd__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rand__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rfloordiv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rlshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmod__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rmul__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__ror__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rpow__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rrshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rshift__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rsub__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rtruediv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__rxor__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__sub__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__truediv__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__xor__": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.as_integer_ratio": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_count": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.bit_length": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.conjugate": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.denominator": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.from_bytes": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.imag": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.is_integer": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.numerator": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.real": true,
+ "google.generativeai.protos.CodeExecutionResult.Outcome.to_bytes": true,
+ "google.generativeai.protos.CodeExecutionResult.__call__": true,
+ "google.generativeai.protos.CodeExecutionResult.__eq__": true,
+ "google.generativeai.protos.CodeExecutionResult.__ge__": true,
+ "google.generativeai.protos.CodeExecutionResult.__gt__": true,
+ "google.generativeai.protos.CodeExecutionResult.__init__": true,
+ "google.generativeai.protos.CodeExecutionResult.__le__": true,
+ "google.generativeai.protos.CodeExecutionResult.__lt__": true,
+ "google.generativeai.protos.CodeExecutionResult.__ne__": true,
+ "google.generativeai.protos.CodeExecutionResult.__new__": true,
+ "google.generativeai.protos.CodeExecutionResult.__or__": true,
+ "google.generativeai.protos.CodeExecutionResult.__ror__": true,
+ "google.generativeai.protos.CodeExecutionResult.copy_from": true,
+ "google.generativeai.protos.CodeExecutionResult.deserialize": true,
+ "google.generativeai.protos.CodeExecutionResult.from_json": true,
+ "google.generativeai.protos.CodeExecutionResult.mro": true,
+ "google.generativeai.protos.CodeExecutionResult.outcome": true,
+ "google.generativeai.protos.CodeExecutionResult.output": true,
+ "google.generativeai.protos.CodeExecutionResult.pb": true,
+ "google.generativeai.protos.CodeExecutionResult.serialize": true,
+ "google.generativeai.protos.CodeExecutionResult.to_dict": true,
+ "google.generativeai.protos.CodeExecutionResult.to_json": true,
+ "google.generativeai.protos.CodeExecutionResult.wrap": true,
+ "google.generativeai.protos.Condition": false,
+ "google.generativeai.protos.Condition.Operator": false,
+ "google.generativeai.protos.Condition.Operator.EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.EXCLUDES": true,
+ "google.generativeai.protos.Condition.Operator.GREATER": true,
+ "google.generativeai.protos.Condition.Operator.GREATER_EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.INCLUDES": true,
+ "google.generativeai.protos.Condition.Operator.LESS": true,
+ "google.generativeai.protos.Condition.Operator.LESS_EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.NOT_EQUAL": true,
+ "google.generativeai.protos.Condition.Operator.OPERATOR_UNSPECIFIED": true,
+ "google.generativeai.protos.Condition.Operator.__abs__": true,
+ "google.generativeai.protos.Condition.Operator.__add__": true,
+ "google.generativeai.protos.Condition.Operator.__and__": true,
+ "google.generativeai.protos.Condition.Operator.__bool__": true,
+ "google.generativeai.protos.Condition.Operator.__contains__": true,
+ "google.generativeai.protos.Condition.Operator.__eq__": true,
+ "google.generativeai.protos.Condition.Operator.__floordiv__": true,
+ "google.generativeai.protos.Condition.Operator.__ge__": true,
+ "google.generativeai.protos.Condition.Operator.__getitem__": true,
+ "google.generativeai.protos.Condition.Operator.__gt__": true,
+ "google.generativeai.protos.Condition.Operator.__init__": true,
+ "google.generativeai.protos.Condition.Operator.__invert__": true,
+ "google.generativeai.protos.Condition.Operator.__iter__": true,
+ "google.generativeai.protos.Condition.Operator.__le__": true,
+ "google.generativeai.protos.Condition.Operator.__len__": true,
+ "google.generativeai.protos.Condition.Operator.__lshift__": true,
+ "google.generativeai.protos.Condition.Operator.__lt__": true,
+ "google.generativeai.protos.Condition.Operator.__mod__": true,
+ "google.generativeai.protos.Condition.Operator.__mul__": true,
+ "google.generativeai.protos.Condition.Operator.__ne__": true,
+ "google.generativeai.protos.Condition.Operator.__neg__": true,
+ "google.generativeai.protos.Condition.Operator.__new__": true,
+ "google.generativeai.protos.Condition.Operator.__or__": true,
+ "google.generativeai.protos.Condition.Operator.__pos__": true,
+ "google.generativeai.protos.Condition.Operator.__pow__": true,
+ "google.generativeai.protos.Condition.Operator.__radd__": true,
+ "google.generativeai.protos.Condition.Operator.__rand__": true,
+ "google.generativeai.protos.Condition.Operator.__rfloordiv__": true,
+ "google.generativeai.protos.Condition.Operator.__rlshift__": true,
+ "google.generativeai.protos.Condition.Operator.__rmod__": true,
+ "google.generativeai.protos.Condition.Operator.__rmul__": true,
+ "google.generativeai.protos.Condition.Operator.__ror__": true,
+ "google.generativeai.protos.Condition.Operator.__rpow__": true,
+ "google.generativeai.protos.Condition.Operator.__rrshift__": true,
+ "google.generativeai.protos.Condition.Operator.__rshift__": true,
+ "google.generativeai.protos.Condition.Operator.__rsub__": true,
+ "google.generativeai.protos.Condition.Operator.__rtruediv__": true,
+ "google.generativeai.protos.Condition.Operator.__rxor__": true,
+ "google.generativeai.protos.Condition.Operator.__sub__": true,
+ "google.generativeai.protos.Condition.Operator.__truediv__": true,
+ "google.generativeai.protos.Condition.Operator.__xor__": true,
+ "google.generativeai.protos.Condition.Operator.as_integer_ratio": true,
+ "google.generativeai.protos.Condition.Operator.bit_count": true,
+ "google.generativeai.protos.Condition.Operator.bit_length": true,
+ "google.generativeai.protos.Condition.Operator.conjugate": true,
+ "google.generativeai.protos.Condition.Operator.denominator": true,
+ "google.generativeai.protos.Condition.Operator.from_bytes": true,
+ "google.generativeai.protos.Condition.Operator.imag": true,
+ "google.generativeai.protos.Condition.Operator.is_integer": true,
+ "google.generativeai.protos.Condition.Operator.numerator": true,
+ "google.generativeai.protos.Condition.Operator.real": true,
+ "google.generativeai.protos.Condition.Operator.to_bytes": true,
+ "google.generativeai.protos.Condition.__call__": true,
+ "google.generativeai.protos.Condition.__eq__": true,
+ "google.generativeai.protos.Condition.__ge__": true,
+ "google.generativeai.protos.Condition.__gt__": true,
+ "google.generativeai.protos.Condition.__init__": true,
+ "google.generativeai.protos.Condition.__le__": true,
+ "google.generativeai.protos.Condition.__lt__": true,
+ "google.generativeai.protos.Condition.__ne__": true,
+ "google.generativeai.protos.Condition.__new__": true,
+ "google.generativeai.protos.Condition.__or__": true,
+ "google.generativeai.protos.Condition.__ror__": true,
+ "google.generativeai.protos.Condition.copy_from": true,
+ "google.generativeai.protos.Condition.deserialize": true,
+ "google.generativeai.protos.Condition.from_json": true,
+ "google.generativeai.protos.Condition.mro": true,
+ "google.generativeai.protos.Condition.numeric_value": true,
+ "google.generativeai.protos.Condition.operation": true,
+ "google.generativeai.protos.Condition.pb": true,
+ "google.generativeai.protos.Condition.serialize": true,
+ "google.generativeai.protos.Condition.string_value": true,
+ "google.generativeai.protos.Condition.to_dict": true,
+ "google.generativeai.protos.Condition.to_json": true,
+ "google.generativeai.protos.Condition.wrap": true,
+ "google.generativeai.protos.Content": false,
+ "google.generativeai.protos.Content.__call__": true,
+ "google.generativeai.protos.Content.__eq__": true,
+ "google.generativeai.protos.Content.__ge__": true,
+ "google.generativeai.protos.Content.__gt__": true,
+ "google.generativeai.protos.Content.__init__": true,
+ "google.generativeai.protos.Content.__le__": true,
+ "google.generativeai.protos.Content.__lt__": true,
+ "google.generativeai.protos.Content.__ne__": true,
+ "google.generativeai.protos.Content.__new__": true,
+ "google.generativeai.protos.Content.__or__": true,
+ "google.generativeai.protos.Content.__ror__": true,
+ "google.generativeai.protos.Content.copy_from": true,
+ "google.generativeai.protos.Content.deserialize": true,
+ "google.generativeai.protos.Content.from_json": true,
+ "google.generativeai.protos.Content.mro": true,
+ "google.generativeai.protos.Content.parts": true,
+ "google.generativeai.protos.Content.pb": true,
+ "google.generativeai.protos.Content.role": true,
+ "google.generativeai.protos.Content.serialize": true,
+ "google.generativeai.protos.Content.to_dict": true,
+ "google.generativeai.protos.Content.to_json": true,
+ "google.generativeai.protos.Content.wrap": true,
+ "google.generativeai.protos.ContentEmbedding": false,
+ "google.generativeai.protos.ContentEmbedding.__call__": true,
+ "google.generativeai.protos.ContentEmbedding.__eq__": true,
+ "google.generativeai.protos.ContentEmbedding.__ge__": true,
+ "google.generativeai.protos.ContentEmbedding.__gt__": true,
+ "google.generativeai.protos.ContentEmbedding.__init__": true,
+ "google.generativeai.protos.ContentEmbedding.__le__": true,
+ "google.generativeai.protos.ContentEmbedding.__lt__": true,
+ "google.generativeai.protos.ContentEmbedding.__ne__": true,
+ "google.generativeai.protos.ContentEmbedding.__new__": true,
+ "google.generativeai.protos.ContentEmbedding.__or__": true,
+ "google.generativeai.protos.ContentEmbedding.__ror__": true,
+ "google.generativeai.protos.ContentEmbedding.copy_from": true,
+ "google.generativeai.protos.ContentEmbedding.deserialize": true,
+ "google.generativeai.protos.ContentEmbedding.from_json": true,
+ "google.generativeai.protos.ContentEmbedding.mro": true,
+ "google.generativeai.protos.ContentEmbedding.pb": true,
+ "google.generativeai.protos.ContentEmbedding.serialize": true,
+ "google.generativeai.protos.ContentEmbedding.to_dict": true,
+ "google.generativeai.protos.ContentEmbedding.to_json": true,
+ "google.generativeai.protos.ContentEmbedding.values": true,
+ "google.generativeai.protos.ContentEmbedding.wrap": true,
+ "google.generativeai.protos.ContentFilter": false,
+ "google.generativeai.protos.ContentFilter.BlockedReason": false,
+ "google.generativeai.protos.ContentFilter.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.OTHER": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.SAFETY": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__abs__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__add__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__and__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__bool__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__contains__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__eq__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__floordiv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ge__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__getitem__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__gt__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__init__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__invert__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__iter__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__le__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__len__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__lt__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mod__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__mul__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ne__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__neg__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__new__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__or__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pos__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__pow__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__radd__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rand__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rfloordiv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rlshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmod__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rmul__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__ror__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rpow__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rrshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rshift__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rsub__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rtruediv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__rxor__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__sub__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__truediv__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.__xor__": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.as_integer_ratio": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_count": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.bit_length": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.conjugate": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.denominator": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.from_bytes": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.imag": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.is_integer": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.numerator": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.real": true,
+ "google.generativeai.protos.ContentFilter.BlockedReason.to_bytes": true,
+ "google.generativeai.protos.ContentFilter.__call__": true,
+ "google.generativeai.protos.ContentFilter.__eq__": true,
+ "google.generativeai.protos.ContentFilter.__ge__": true,
+ "google.generativeai.protos.ContentFilter.__gt__": true,
+ "google.generativeai.protos.ContentFilter.__init__": true,
+ "google.generativeai.protos.ContentFilter.__le__": true,
+ "google.generativeai.protos.ContentFilter.__lt__": true,
+ "google.generativeai.protos.ContentFilter.__ne__": true,
+ "google.generativeai.protos.ContentFilter.__new__": true,
+ "google.generativeai.protos.ContentFilter.__or__": true,
+ "google.generativeai.protos.ContentFilter.__ror__": true,
+ "google.generativeai.protos.ContentFilter.copy_from": true,
+ "google.generativeai.protos.ContentFilter.deserialize": true,
+ "google.generativeai.protos.ContentFilter.from_json": true,
+ "google.generativeai.protos.ContentFilter.message": true,
+ "google.generativeai.protos.ContentFilter.mro": true,
+ "google.generativeai.protos.ContentFilter.pb": true,
+ "google.generativeai.protos.ContentFilter.reason": true,
+ "google.generativeai.protos.ContentFilter.serialize": true,
+ "google.generativeai.protos.ContentFilter.to_dict": true,
+ "google.generativeai.protos.ContentFilter.to_json": true,
+ "google.generativeai.protos.ContentFilter.wrap": true,
+ "google.generativeai.protos.Corpus": false,
+ "google.generativeai.protos.Corpus.__call__": true,
+ "google.generativeai.protos.Corpus.__eq__": true,
+ "google.generativeai.protos.Corpus.__ge__": true,
+ "google.generativeai.protos.Corpus.__gt__": true,
+ "google.generativeai.protos.Corpus.__init__": true,
+ "google.generativeai.protos.Corpus.__le__": true,
+ "google.generativeai.protos.Corpus.__lt__": true,
+ "google.generativeai.protos.Corpus.__ne__": true,
+ "google.generativeai.protos.Corpus.__new__": true,
+ "google.generativeai.protos.Corpus.__or__": true,
+ "google.generativeai.protos.Corpus.__ror__": true,
+ "google.generativeai.protos.Corpus.copy_from": true,
+ "google.generativeai.protos.Corpus.create_time": true,
+ "google.generativeai.protos.Corpus.deserialize": true,
+ "google.generativeai.protos.Corpus.display_name": true,
+ "google.generativeai.protos.Corpus.from_json": true,
+ "google.generativeai.protos.Corpus.mro": true,
+ "google.generativeai.protos.Corpus.name": true,
+ "google.generativeai.protos.Corpus.pb": true,
+ "google.generativeai.protos.Corpus.serialize": true,
+ "google.generativeai.protos.Corpus.to_dict": true,
+ "google.generativeai.protos.Corpus.to_json": true,
+ "google.generativeai.protos.Corpus.update_time": true,
+ "google.generativeai.protos.Corpus.wrap": true,
+ "google.generativeai.protos.CountMessageTokensRequest": false,
+ "google.generativeai.protos.CountMessageTokensRequest.__call__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__eq__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__ge__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__gt__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__init__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__le__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__lt__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__ne__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__new__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__or__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.__ror__": true,
+ "google.generativeai.protos.CountMessageTokensRequest.copy_from": true,
+ "google.generativeai.protos.CountMessageTokensRequest.deserialize": true,
+ "google.generativeai.protos.CountMessageTokensRequest.from_json": true,
+ "google.generativeai.protos.CountMessageTokensRequest.model": true,
+ "google.generativeai.protos.CountMessageTokensRequest.mro": true,
+ "google.generativeai.protos.CountMessageTokensRequest.pb": true,
+ "google.generativeai.protos.CountMessageTokensRequest.prompt": true,
+ "google.generativeai.protos.CountMessageTokensRequest.serialize": true,
+ "google.generativeai.protos.CountMessageTokensRequest.to_dict": true,
+ "google.generativeai.protos.CountMessageTokensRequest.to_json": true,
+ "google.generativeai.protos.CountMessageTokensRequest.wrap": true,
+ "google.generativeai.protos.CountMessageTokensResponse": false,
+ "google.generativeai.protos.CountMessageTokensResponse.__call__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__eq__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__ge__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__gt__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__init__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__le__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__lt__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__ne__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__new__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__or__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.__ror__": true,
+ "google.generativeai.protos.CountMessageTokensResponse.copy_from": true,
+ "google.generativeai.protos.CountMessageTokensResponse.deserialize": true,
+ "google.generativeai.protos.CountMessageTokensResponse.from_json": true,
+ "google.generativeai.protos.CountMessageTokensResponse.mro": true,
+ "google.generativeai.protos.CountMessageTokensResponse.pb": true,
+ "google.generativeai.protos.CountMessageTokensResponse.serialize": true,
+ "google.generativeai.protos.CountMessageTokensResponse.to_dict": true,
+ "google.generativeai.protos.CountMessageTokensResponse.to_json": true,
+ "google.generativeai.protos.CountMessageTokensResponse.token_count": true,
+ "google.generativeai.protos.CountMessageTokensResponse.wrap": true,
+ "google.generativeai.protos.CountTextTokensRequest": false,
+ "google.generativeai.protos.CountTextTokensRequest.__call__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__eq__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__ge__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__gt__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__init__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__le__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__lt__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__ne__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__new__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__or__": true,
+ "google.generativeai.protos.CountTextTokensRequest.__ror__": true,
+ "google.generativeai.protos.CountTextTokensRequest.copy_from": true,
+ "google.generativeai.protos.CountTextTokensRequest.deserialize": true,
+ "google.generativeai.protos.CountTextTokensRequest.from_json": true,
+ "google.generativeai.protos.CountTextTokensRequest.model": true,
+ "google.generativeai.protos.CountTextTokensRequest.mro": true,
+ "google.generativeai.protos.CountTextTokensRequest.pb": true,
+ "google.generativeai.protos.CountTextTokensRequest.prompt": true,
+ "google.generativeai.protos.CountTextTokensRequest.serialize": true,
+ "google.generativeai.protos.CountTextTokensRequest.to_dict": true,
+ "google.generativeai.protos.CountTextTokensRequest.to_json": true,
+ "google.generativeai.protos.CountTextTokensRequest.wrap": true,
+ "google.generativeai.protos.CountTextTokensResponse": false,
+ "google.generativeai.protos.CountTextTokensResponse.__call__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__eq__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__ge__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__gt__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__init__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__le__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__lt__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__ne__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__new__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__or__": true,
+ "google.generativeai.protos.CountTextTokensResponse.__ror__": true,
+ "google.generativeai.protos.CountTextTokensResponse.copy_from": true,
+ "google.generativeai.protos.CountTextTokensResponse.deserialize": true,
+ "google.generativeai.protos.CountTextTokensResponse.from_json": true,
+ "google.generativeai.protos.CountTextTokensResponse.mro": true,
+ "google.generativeai.protos.CountTextTokensResponse.pb": true,
+ "google.generativeai.protos.CountTextTokensResponse.serialize": true,
+ "google.generativeai.protos.CountTextTokensResponse.to_dict": true,
+ "google.generativeai.protos.CountTextTokensResponse.to_json": true,
+ "google.generativeai.protos.CountTextTokensResponse.token_count": true,
+ "google.generativeai.protos.CountTextTokensResponse.wrap": true,
+ "google.generativeai.protos.CountTokensRequest": false,
+ "google.generativeai.protos.CountTokensRequest.__call__": true,
+ "google.generativeai.protos.CountTokensRequest.__eq__": true,
+ "google.generativeai.protos.CountTokensRequest.__ge__": true,
+ "google.generativeai.protos.CountTokensRequest.__gt__": true,
+ "google.generativeai.protos.CountTokensRequest.__init__": true,
+ "google.generativeai.protos.CountTokensRequest.__le__": true,
+ "google.generativeai.protos.CountTokensRequest.__lt__": true,
+ "google.generativeai.protos.CountTokensRequest.__ne__": true,
+ "google.generativeai.protos.CountTokensRequest.__new__": true,
+ "google.generativeai.protos.CountTokensRequest.__or__": true,
+ "google.generativeai.protos.CountTokensRequest.__ror__": true,
+ "google.generativeai.protos.CountTokensRequest.contents": true,
+ "google.generativeai.protos.CountTokensRequest.copy_from": true,
+ "google.generativeai.protos.CountTokensRequest.deserialize": true,
+ "google.generativeai.protos.CountTokensRequest.from_json": true,
+ "google.generativeai.protos.CountTokensRequest.generate_content_request": true,
+ "google.generativeai.protos.CountTokensRequest.model": true,
+ "google.generativeai.protos.CountTokensRequest.mro": true,
+ "google.generativeai.protos.CountTokensRequest.pb": true,
+ "google.generativeai.protos.CountTokensRequest.serialize": true,
+ "google.generativeai.protos.CountTokensRequest.to_dict": true,
+ "google.generativeai.protos.CountTokensRequest.to_json": true,
+ "google.generativeai.protos.CountTokensRequest.wrap": true,
+ "google.generativeai.protos.CountTokensResponse": false,
+ "google.generativeai.protos.CountTokensResponse.__call__": true,
+ "google.generativeai.protos.CountTokensResponse.__eq__": true,
+ "google.generativeai.protos.CountTokensResponse.__ge__": true,
+ "google.generativeai.protos.CountTokensResponse.__gt__": true,
+ "google.generativeai.protos.CountTokensResponse.__init__": true,
+ "google.generativeai.protos.CountTokensResponse.__le__": true,
+ "google.generativeai.protos.CountTokensResponse.__lt__": true,
+ "google.generativeai.protos.CountTokensResponse.__ne__": true,
+ "google.generativeai.protos.CountTokensResponse.__new__": true,
+ "google.generativeai.protos.CountTokensResponse.__or__": true,
+ "google.generativeai.protos.CountTokensResponse.__ror__": true,
+ "google.generativeai.protos.CountTokensResponse.cached_content_token_count": true,
+ "google.generativeai.protos.CountTokensResponse.copy_from": true,
+ "google.generativeai.protos.CountTokensResponse.deserialize": true,
+ "google.generativeai.protos.CountTokensResponse.from_json": true,
+ "google.generativeai.protos.CountTokensResponse.mro": true,
+ "google.generativeai.protos.CountTokensResponse.pb": true,
+ "google.generativeai.protos.CountTokensResponse.serialize": true,
+ "google.generativeai.protos.CountTokensResponse.to_dict": true,
+ "google.generativeai.protos.CountTokensResponse.to_json": true,
+ "google.generativeai.protos.CountTokensResponse.total_tokens": true,
+ "google.generativeai.protos.CountTokensResponse.wrap": true,
+ "google.generativeai.protos.CreateCachedContentRequest": false,
+ "google.generativeai.protos.CreateCachedContentRequest.__call__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__init__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__le__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__new__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__or__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.CreateCachedContentRequest.cached_content": true,
+ "google.generativeai.protos.CreateCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.CreateCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.CreateCachedContentRequest.from_json": true,
+ "google.generativeai.protos.CreateCachedContentRequest.mro": true,
+ "google.generativeai.protos.CreateCachedContentRequest.pb": true,
+ "google.generativeai.protos.CreateCachedContentRequest.serialize": true,
+ "google.generativeai.protos.CreateCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.CreateCachedContentRequest.to_json": true,
+ "google.generativeai.protos.CreateCachedContentRequest.wrap": true,
+ "google.generativeai.protos.CreateChunkRequest": false,
+ "google.generativeai.protos.CreateChunkRequest.__call__": true,
+ "google.generativeai.protos.CreateChunkRequest.__eq__": true,
+ "google.generativeai.protos.CreateChunkRequest.__ge__": true,
+ "google.generativeai.protos.CreateChunkRequest.__gt__": true,
+ "google.generativeai.protos.CreateChunkRequest.__init__": true,
+ "google.generativeai.protos.CreateChunkRequest.__le__": true,
+ "google.generativeai.protos.CreateChunkRequest.__lt__": true,
+ "google.generativeai.protos.CreateChunkRequest.__ne__": true,
+ "google.generativeai.protos.CreateChunkRequest.__new__": true,
+ "google.generativeai.protos.CreateChunkRequest.__or__": true,
+ "google.generativeai.protos.CreateChunkRequest.__ror__": true,
+ "google.generativeai.protos.CreateChunkRequest.chunk": true,
+ "google.generativeai.protos.CreateChunkRequest.copy_from": true,
+ "google.generativeai.protos.CreateChunkRequest.deserialize": true,
+ "google.generativeai.protos.CreateChunkRequest.from_json": true,
+ "google.generativeai.protos.CreateChunkRequest.mro": true,
+ "google.generativeai.protos.CreateChunkRequest.parent": true,
+ "google.generativeai.protos.CreateChunkRequest.pb": true,
+ "google.generativeai.protos.CreateChunkRequest.serialize": true,
+ "google.generativeai.protos.CreateChunkRequest.to_dict": true,
+ "google.generativeai.protos.CreateChunkRequest.to_json": true,
+ "google.generativeai.protos.CreateChunkRequest.wrap": true,
+ "google.generativeai.protos.CreateCorpusRequest": false,
+ "google.generativeai.protos.CreateCorpusRequest.__call__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__eq__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__ge__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__gt__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__init__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__le__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__lt__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__ne__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__new__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__or__": true,
+ "google.generativeai.protos.CreateCorpusRequest.__ror__": true,
+ "google.generativeai.protos.CreateCorpusRequest.copy_from": true,
+ "google.generativeai.protos.CreateCorpusRequest.corpus": true,
+ "google.generativeai.protos.CreateCorpusRequest.deserialize": true,
+ "google.generativeai.protos.CreateCorpusRequest.from_json": true,
+ "google.generativeai.protos.CreateCorpusRequest.mro": true,
+ "google.generativeai.protos.CreateCorpusRequest.pb": true,
+ "google.generativeai.protos.CreateCorpusRequest.serialize": true,
+ "google.generativeai.protos.CreateCorpusRequest.to_dict": true,
+ "google.generativeai.protos.CreateCorpusRequest.to_json": true,
+ "google.generativeai.protos.CreateCorpusRequest.wrap": true,
+ "google.generativeai.protos.CreateDocumentRequest": false,
+ "google.generativeai.protos.CreateDocumentRequest.__call__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__eq__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__ge__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__gt__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__init__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__le__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__lt__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__ne__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__new__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__or__": true,
+ "google.generativeai.protos.CreateDocumentRequest.__ror__": true,
+ "google.generativeai.protos.CreateDocumentRequest.copy_from": true,
+ "google.generativeai.protos.CreateDocumentRequest.deserialize": true,
+ "google.generativeai.protos.CreateDocumentRequest.document": true,
+ "google.generativeai.protos.CreateDocumentRequest.from_json": true,
+ "google.generativeai.protos.CreateDocumentRequest.mro": true,
+ "google.generativeai.protos.CreateDocumentRequest.parent": true,
+ "google.generativeai.protos.CreateDocumentRequest.pb": true,
+ "google.generativeai.protos.CreateDocumentRequest.serialize": true,
+ "google.generativeai.protos.CreateDocumentRequest.to_dict": true,
+ "google.generativeai.protos.CreateDocumentRequest.to_json": true,
+ "google.generativeai.protos.CreateDocumentRequest.wrap": true,
+ "google.generativeai.protos.CreateFileRequest": false,
+ "google.generativeai.protos.CreateFileRequest.__call__": true,
+ "google.generativeai.protos.CreateFileRequest.__eq__": true,
+ "google.generativeai.protos.CreateFileRequest.__ge__": true,
+ "google.generativeai.protos.CreateFileRequest.__gt__": true,
+ "google.generativeai.protos.CreateFileRequest.__init__": true,
+ "google.generativeai.protos.CreateFileRequest.__le__": true,
+ "google.generativeai.protos.CreateFileRequest.__lt__": true,
+ "google.generativeai.protos.CreateFileRequest.__ne__": true,
+ "google.generativeai.protos.CreateFileRequest.__new__": true,
+ "google.generativeai.protos.CreateFileRequest.__or__": true,
+ "google.generativeai.protos.CreateFileRequest.__ror__": true,
+ "google.generativeai.protos.CreateFileRequest.copy_from": true,
+ "google.generativeai.protos.CreateFileRequest.deserialize": true,
+ "google.generativeai.protos.CreateFileRequest.file": true,
+ "google.generativeai.protos.CreateFileRequest.from_json": true,
+ "google.generativeai.protos.CreateFileRequest.mro": true,
+ "google.generativeai.protos.CreateFileRequest.pb": true,
+ "google.generativeai.protos.CreateFileRequest.serialize": true,
+ "google.generativeai.protos.CreateFileRequest.to_dict": true,
+ "google.generativeai.protos.CreateFileRequest.to_json": true,
+ "google.generativeai.protos.CreateFileRequest.wrap": true,
+ "google.generativeai.protos.CreateFileResponse": false,
+ "google.generativeai.protos.CreateFileResponse.__call__": true,
+ "google.generativeai.protos.CreateFileResponse.__eq__": true,
+ "google.generativeai.protos.CreateFileResponse.__ge__": true,
+ "google.generativeai.protos.CreateFileResponse.__gt__": true,
+ "google.generativeai.protos.CreateFileResponse.__init__": true,
+ "google.generativeai.protos.CreateFileResponse.__le__": true,
+ "google.generativeai.protos.CreateFileResponse.__lt__": true,
+ "google.generativeai.protos.CreateFileResponse.__ne__": true,
+ "google.generativeai.protos.CreateFileResponse.__new__": true,
+ "google.generativeai.protos.CreateFileResponse.__or__": true,
+ "google.generativeai.protos.CreateFileResponse.__ror__": true,
+ "google.generativeai.protos.CreateFileResponse.copy_from": true,
+ "google.generativeai.protos.CreateFileResponse.deserialize": true,
+ "google.generativeai.protos.CreateFileResponse.file": true,
+ "google.generativeai.protos.CreateFileResponse.from_json": true,
+ "google.generativeai.protos.CreateFileResponse.mro": true,
+ "google.generativeai.protos.CreateFileResponse.pb": true,
+ "google.generativeai.protos.CreateFileResponse.serialize": true,
+ "google.generativeai.protos.CreateFileResponse.to_dict": true,
+ "google.generativeai.protos.CreateFileResponse.to_json": true,
+ "google.generativeai.protos.CreateFileResponse.wrap": true,
+ "google.generativeai.protos.CreatePermissionRequest": false,
+ "google.generativeai.protos.CreatePermissionRequest.__call__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__eq__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__ge__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__gt__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__init__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__le__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__lt__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__ne__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__new__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__or__": true,
+ "google.generativeai.protos.CreatePermissionRequest.__ror__": true,
+ "google.generativeai.protos.CreatePermissionRequest.copy_from": true,
+ "google.generativeai.protos.CreatePermissionRequest.deserialize": true,
+ "google.generativeai.protos.CreatePermissionRequest.from_json": true,
+ "google.generativeai.protos.CreatePermissionRequest.mro": true,
+ "google.generativeai.protos.CreatePermissionRequest.parent": true,
+ "google.generativeai.protos.CreatePermissionRequest.pb": true,
+ "google.generativeai.protos.CreatePermissionRequest.permission": true,
+ "google.generativeai.protos.CreatePermissionRequest.serialize": true,
+ "google.generativeai.protos.CreatePermissionRequest.to_dict": true,
+ "google.generativeai.protos.CreatePermissionRequest.to_json": true,
+ "google.generativeai.protos.CreatePermissionRequest.wrap": true,
+ "google.generativeai.protos.CreateTunedModelMetadata": false,
+ "google.generativeai.protos.CreateTunedModelMetadata.__call__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__eq__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__ge__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__gt__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__init__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__le__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__lt__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__ne__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__new__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__or__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.__ror__": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.completed_percent": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.completed_steps": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.copy_from": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.deserialize": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.from_json": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.mro": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.pb": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.serialize": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.snapshots": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.to_dict": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.to_json": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.total_steps": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.tuned_model": true,
+ "google.generativeai.protos.CreateTunedModelMetadata.wrap": true,
+ "google.generativeai.protos.CreateTunedModelRequest": false,
+ "google.generativeai.protos.CreateTunedModelRequest.__call__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__init__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__le__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__new__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__or__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.CreateTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.CreateTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.CreateTunedModelRequest.from_json": true,
+ "google.generativeai.protos.CreateTunedModelRequest.mro": true,
+ "google.generativeai.protos.CreateTunedModelRequest.pb": true,
+ "google.generativeai.protos.CreateTunedModelRequest.serialize": true,
+ "google.generativeai.protos.CreateTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.CreateTunedModelRequest.to_json": true,
+ "google.generativeai.protos.CreateTunedModelRequest.tuned_model": true,
+ "google.generativeai.protos.CreateTunedModelRequest.tuned_model_id": true,
+ "google.generativeai.protos.CreateTunedModelRequest.wrap": true,
+ "google.generativeai.protos.CustomMetadata": false,
+ "google.generativeai.protos.CustomMetadata.__call__": true,
+ "google.generativeai.protos.CustomMetadata.__eq__": true,
+ "google.generativeai.protos.CustomMetadata.__ge__": true,
+ "google.generativeai.protos.CustomMetadata.__gt__": true,
+ "google.generativeai.protos.CustomMetadata.__init__": true,
+ "google.generativeai.protos.CustomMetadata.__le__": true,
+ "google.generativeai.protos.CustomMetadata.__lt__": true,
+ "google.generativeai.protos.CustomMetadata.__ne__": true,
+ "google.generativeai.protos.CustomMetadata.__new__": true,
+ "google.generativeai.protos.CustomMetadata.__or__": true,
+ "google.generativeai.protos.CustomMetadata.__ror__": true,
+ "google.generativeai.protos.CustomMetadata.copy_from": true,
+ "google.generativeai.protos.CustomMetadata.deserialize": true,
+ "google.generativeai.protos.CustomMetadata.from_json": true,
+ "google.generativeai.protos.CustomMetadata.key": true,
+ "google.generativeai.protos.CustomMetadata.mro": true,
+ "google.generativeai.protos.CustomMetadata.numeric_value": true,
+ "google.generativeai.protos.CustomMetadata.pb": true,
+ "google.generativeai.protos.CustomMetadata.serialize": true,
+ "google.generativeai.protos.CustomMetadata.string_list_value": true,
+ "google.generativeai.protos.CustomMetadata.string_value": true,
+ "google.generativeai.protos.CustomMetadata.to_dict": true,
+ "google.generativeai.protos.CustomMetadata.to_json": true,
+ "google.generativeai.protos.CustomMetadata.wrap": true,
+ "google.generativeai.protos.Dataset": false,
+ "google.generativeai.protos.Dataset.__call__": true,
+ "google.generativeai.protos.Dataset.__eq__": true,
+ "google.generativeai.protos.Dataset.__ge__": true,
+ "google.generativeai.protos.Dataset.__gt__": true,
+ "google.generativeai.protos.Dataset.__init__": true,
+ "google.generativeai.protos.Dataset.__le__": true,
+ "google.generativeai.protos.Dataset.__lt__": true,
+ "google.generativeai.protos.Dataset.__ne__": true,
+ "google.generativeai.protos.Dataset.__new__": true,
+ "google.generativeai.protos.Dataset.__or__": true,
+ "google.generativeai.protos.Dataset.__ror__": true,
+ "google.generativeai.protos.Dataset.copy_from": true,
+ "google.generativeai.protos.Dataset.deserialize": true,
+ "google.generativeai.protos.Dataset.examples": true,
+ "google.generativeai.protos.Dataset.from_json": true,
+ "google.generativeai.protos.Dataset.mro": true,
+ "google.generativeai.protos.Dataset.pb": true,
+ "google.generativeai.protos.Dataset.serialize": true,
+ "google.generativeai.protos.Dataset.to_dict": true,
+ "google.generativeai.protos.Dataset.to_json": true,
+ "google.generativeai.protos.Dataset.wrap": true,
+ "google.generativeai.protos.DeleteCachedContentRequest": false,
+ "google.generativeai.protos.DeleteCachedContentRequest.__call__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__init__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__le__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__new__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__or__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.from_json": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.mro": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.name": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.pb": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.serialize": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.to_json": true,
+ "google.generativeai.protos.DeleteCachedContentRequest.wrap": true,
+ "google.generativeai.protos.DeleteChunkRequest": false,
+ "google.generativeai.protos.DeleteChunkRequest.__call__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__eq__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__ge__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__gt__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__init__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__le__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__lt__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__ne__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__new__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__or__": true,
+ "google.generativeai.protos.DeleteChunkRequest.__ror__": true,
+ "google.generativeai.protos.DeleteChunkRequest.copy_from": true,
+ "google.generativeai.protos.DeleteChunkRequest.deserialize": true,
+ "google.generativeai.protos.DeleteChunkRequest.from_json": true,
+ "google.generativeai.protos.DeleteChunkRequest.mro": true,
+ "google.generativeai.protos.DeleteChunkRequest.name": true,
+ "google.generativeai.protos.DeleteChunkRequest.pb": true,
+ "google.generativeai.protos.DeleteChunkRequest.serialize": true,
+ "google.generativeai.protos.DeleteChunkRequest.to_dict": true,
+ "google.generativeai.protos.DeleteChunkRequest.to_json": true,
+ "google.generativeai.protos.DeleteChunkRequest.wrap": true,
+ "google.generativeai.protos.DeleteCorpusRequest": false,
+ "google.generativeai.protos.DeleteCorpusRequest.__call__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__eq__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__ge__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__gt__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__init__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__le__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__lt__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__ne__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__new__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__or__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.__ror__": true,
+ "google.generativeai.protos.DeleteCorpusRequest.copy_from": true,
+ "google.generativeai.protos.DeleteCorpusRequest.deserialize": true,
+ "google.generativeai.protos.DeleteCorpusRequest.force": true,
+ "google.generativeai.protos.DeleteCorpusRequest.from_json": true,
+ "google.generativeai.protos.DeleteCorpusRequest.mro": true,
+ "google.generativeai.protos.DeleteCorpusRequest.name": true,
+ "google.generativeai.protos.DeleteCorpusRequest.pb": true,
+ "google.generativeai.protos.DeleteCorpusRequest.serialize": true,
+ "google.generativeai.protos.DeleteCorpusRequest.to_dict": true,
+ "google.generativeai.protos.DeleteCorpusRequest.to_json": true,
+ "google.generativeai.protos.DeleteCorpusRequest.wrap": true,
+ "google.generativeai.protos.DeleteDocumentRequest": false,
+ "google.generativeai.protos.DeleteDocumentRequest.__call__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__eq__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__ge__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__gt__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__init__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__le__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__lt__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__ne__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__new__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__or__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.__ror__": true,
+ "google.generativeai.protos.DeleteDocumentRequest.copy_from": true,
+ "google.generativeai.protos.DeleteDocumentRequest.deserialize": true,
+ "google.generativeai.protos.DeleteDocumentRequest.force": true,
+ "google.generativeai.protos.DeleteDocumentRequest.from_json": true,
+ "google.generativeai.protos.DeleteDocumentRequest.mro": true,
+ "google.generativeai.protos.DeleteDocumentRequest.name": true,
+ "google.generativeai.protos.DeleteDocumentRequest.pb": true,
+ "google.generativeai.protos.DeleteDocumentRequest.serialize": true,
+ "google.generativeai.protos.DeleteDocumentRequest.to_dict": true,
+ "google.generativeai.protos.DeleteDocumentRequest.to_json": true,
+ "google.generativeai.protos.DeleteDocumentRequest.wrap": true,
+ "google.generativeai.protos.DeleteFileRequest": false,
+ "google.generativeai.protos.DeleteFileRequest.__call__": true,
+ "google.generativeai.protos.DeleteFileRequest.__eq__": true,
+ "google.generativeai.protos.DeleteFileRequest.__ge__": true,
+ "google.generativeai.protos.DeleteFileRequest.__gt__": true,
+ "google.generativeai.protos.DeleteFileRequest.__init__": true,
+ "google.generativeai.protos.DeleteFileRequest.__le__": true,
+ "google.generativeai.protos.DeleteFileRequest.__lt__": true,
+ "google.generativeai.protos.DeleteFileRequest.__ne__": true,
+ "google.generativeai.protos.DeleteFileRequest.__new__": true,
+ "google.generativeai.protos.DeleteFileRequest.__or__": true,
+ "google.generativeai.protos.DeleteFileRequest.__ror__": true,
+ "google.generativeai.protos.DeleteFileRequest.copy_from": true,
+ "google.generativeai.protos.DeleteFileRequest.deserialize": true,
+ "google.generativeai.protos.DeleteFileRequest.from_json": true,
+ "google.generativeai.protos.DeleteFileRequest.mro": true,
+ "google.generativeai.protos.DeleteFileRequest.name": true,
+ "google.generativeai.protos.DeleteFileRequest.pb": true,
+ "google.generativeai.protos.DeleteFileRequest.serialize": true,
+ "google.generativeai.protos.DeleteFileRequest.to_dict": true,
+ "google.generativeai.protos.DeleteFileRequest.to_json": true,
+ "google.generativeai.protos.DeleteFileRequest.wrap": true,
+ "google.generativeai.protos.DeletePermissionRequest": false,
+ "google.generativeai.protos.DeletePermissionRequest.__call__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__eq__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__ge__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__gt__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__init__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__le__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__lt__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__ne__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__new__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__or__": true,
+ "google.generativeai.protos.DeletePermissionRequest.__ror__": true,
+ "google.generativeai.protos.DeletePermissionRequest.copy_from": true,
+ "google.generativeai.protos.DeletePermissionRequest.deserialize": true,
+ "google.generativeai.protos.DeletePermissionRequest.from_json": true,
+ "google.generativeai.protos.DeletePermissionRequest.mro": true,
+ "google.generativeai.protos.DeletePermissionRequest.name": true,
+ "google.generativeai.protos.DeletePermissionRequest.pb": true,
+ "google.generativeai.protos.DeletePermissionRequest.serialize": true,
+ "google.generativeai.protos.DeletePermissionRequest.to_dict": true,
+ "google.generativeai.protos.DeletePermissionRequest.to_json": true,
+ "google.generativeai.protos.DeletePermissionRequest.wrap": true,
+ "google.generativeai.protos.DeleteTunedModelRequest": false,
+ "google.generativeai.protos.DeleteTunedModelRequest.__call__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__init__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__le__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__new__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__or__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.from_json": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.mro": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.name": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.pb": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.serialize": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.to_json": true,
+ "google.generativeai.protos.DeleteTunedModelRequest.wrap": true,
+ "google.generativeai.protos.Document": false,
+ "google.generativeai.protos.Document.__call__": true,
+ "google.generativeai.protos.Document.__eq__": true,
+ "google.generativeai.protos.Document.__ge__": true,
+ "google.generativeai.protos.Document.__gt__": true,
+ "google.generativeai.protos.Document.__init__": true,
+ "google.generativeai.protos.Document.__le__": true,
+ "google.generativeai.protos.Document.__lt__": true,
+ "google.generativeai.protos.Document.__ne__": true,
+ "google.generativeai.protos.Document.__new__": true,
+ "google.generativeai.protos.Document.__or__": true,
+ "google.generativeai.protos.Document.__ror__": true,
+ "google.generativeai.protos.Document.copy_from": true,
+ "google.generativeai.protos.Document.create_time": true,
+ "google.generativeai.protos.Document.custom_metadata": true,
+ "google.generativeai.protos.Document.deserialize": true,
+ "google.generativeai.protos.Document.display_name": true,
+ "google.generativeai.protos.Document.from_json": true,
+ "google.generativeai.protos.Document.mro": true,
+ "google.generativeai.protos.Document.name": true,
+ "google.generativeai.protos.Document.pb": true,
+ "google.generativeai.protos.Document.serialize": true,
+ "google.generativeai.protos.Document.to_dict": true,
+ "google.generativeai.protos.Document.to_json": true,
+ "google.generativeai.protos.Document.update_time": true,
+ "google.generativeai.protos.Document.wrap": true,
+ "google.generativeai.protos.DynamicRetrievalConfig": false,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode": false,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.MODE_DYNAMIC": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.MODE_UNSPECIFIED": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__abs__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__add__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__and__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__bool__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__contains__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__eq__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__floordiv__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ge__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__getitem__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__gt__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__init__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__invert__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__iter__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__le__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__len__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lshift__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__lt__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mod__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__mul__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ne__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__neg__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__new__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__or__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pos__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__pow__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__radd__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rand__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rfloordiv__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rlshift__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmod__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rmul__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__ror__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rpow__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rrshift__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rshift__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rsub__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rtruediv__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__rxor__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__sub__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__truediv__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__xor__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.as_integer_ratio": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_count": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.bit_length": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.conjugate": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.denominator": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.from_bytes": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.imag": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.is_integer": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.numerator": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.real": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.to_bytes": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__call__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__eq__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__ge__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__gt__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__init__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__le__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__lt__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__ne__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__new__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__or__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.__ror__": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.copy_from": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.deserialize": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.dynamic_threshold": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.from_json": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.mode": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.mro": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.pb": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.serialize": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.to_dict": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.to_json": true,
+ "google.generativeai.protos.DynamicRetrievalConfig.wrap": true,
+ "google.generativeai.protos.EmbedContentRequest": false,
+ "google.generativeai.protos.EmbedContentRequest.__call__": true,
+ "google.generativeai.protos.EmbedContentRequest.__eq__": true,
+ "google.generativeai.protos.EmbedContentRequest.__ge__": true,
+ "google.generativeai.protos.EmbedContentRequest.__gt__": true,
+ "google.generativeai.protos.EmbedContentRequest.__init__": true,
+ "google.generativeai.protos.EmbedContentRequest.__le__": true,
+ "google.generativeai.protos.EmbedContentRequest.__lt__": true,
+ "google.generativeai.protos.EmbedContentRequest.__ne__": true,
+ "google.generativeai.protos.EmbedContentRequest.__new__": true,
+ "google.generativeai.protos.EmbedContentRequest.__or__": true,
+ "google.generativeai.protos.EmbedContentRequest.__ror__": true,
+ "google.generativeai.protos.EmbedContentRequest.content": true,
+ "google.generativeai.protos.EmbedContentRequest.copy_from": true,
+ "google.generativeai.protos.EmbedContentRequest.deserialize": true,
+ "google.generativeai.protos.EmbedContentRequest.from_json": true,
+ "google.generativeai.protos.EmbedContentRequest.model": true,
+ "google.generativeai.protos.EmbedContentRequest.mro": true,
+ "google.generativeai.protos.EmbedContentRequest.output_dimensionality": true,
+ "google.generativeai.protos.EmbedContentRequest.pb": true,
+ "google.generativeai.protos.EmbedContentRequest.serialize": true,
+ "google.generativeai.protos.EmbedContentRequest.task_type": true,
+ "google.generativeai.protos.EmbedContentRequest.title": true,
+ "google.generativeai.protos.EmbedContentRequest.to_dict": true,
+ "google.generativeai.protos.EmbedContentRequest.to_json": true,
+ "google.generativeai.protos.EmbedContentRequest.wrap": true,
+ "google.generativeai.protos.EmbedContentResponse": false,
+ "google.generativeai.protos.EmbedContentResponse.__call__": true,
+ "google.generativeai.protos.EmbedContentResponse.__eq__": true,
+ "google.generativeai.protos.EmbedContentResponse.__ge__": true,
+ "google.generativeai.protos.EmbedContentResponse.__gt__": true,
+ "google.generativeai.protos.EmbedContentResponse.__init__": true,
+ "google.generativeai.protos.EmbedContentResponse.__le__": true,
+ "google.generativeai.protos.EmbedContentResponse.__lt__": true,
+ "google.generativeai.protos.EmbedContentResponse.__ne__": true,
+ "google.generativeai.protos.EmbedContentResponse.__new__": true,
+ "google.generativeai.protos.EmbedContentResponse.__or__": true,
+ "google.generativeai.protos.EmbedContentResponse.__ror__": true,
+ "google.generativeai.protos.EmbedContentResponse.copy_from": true,
+ "google.generativeai.protos.EmbedContentResponse.deserialize": true,
+ "google.generativeai.protos.EmbedContentResponse.embedding": true,
+ "google.generativeai.protos.EmbedContentResponse.from_json": true,
+ "google.generativeai.protos.EmbedContentResponse.mro": true,
+ "google.generativeai.protos.EmbedContentResponse.pb": true,
+ "google.generativeai.protos.EmbedContentResponse.serialize": true,
+ "google.generativeai.protos.EmbedContentResponse.to_dict": true,
+ "google.generativeai.protos.EmbedContentResponse.to_json": true,
+ "google.generativeai.protos.EmbedContentResponse.wrap": true,
+ "google.generativeai.protos.EmbedTextRequest": false,
+ "google.generativeai.protos.EmbedTextRequest.__call__": true,
+ "google.generativeai.protos.EmbedTextRequest.__eq__": true,
+ "google.generativeai.protos.EmbedTextRequest.__ge__": true,
+ "google.generativeai.protos.EmbedTextRequest.__gt__": true,
+ "google.generativeai.protos.EmbedTextRequest.__init__": true,
+ "google.generativeai.protos.EmbedTextRequest.__le__": true,
+ "google.generativeai.protos.EmbedTextRequest.__lt__": true,
+ "google.generativeai.protos.EmbedTextRequest.__ne__": true,
+ "google.generativeai.protos.EmbedTextRequest.__new__": true,
+ "google.generativeai.protos.EmbedTextRequest.__or__": true,
+ "google.generativeai.protos.EmbedTextRequest.__ror__": true,
+ "google.generativeai.protos.EmbedTextRequest.copy_from": true,
+ "google.generativeai.protos.EmbedTextRequest.deserialize": true,
+ "google.generativeai.protos.EmbedTextRequest.from_json": true,
+ "google.generativeai.protos.EmbedTextRequest.model": true,
+ "google.generativeai.protos.EmbedTextRequest.mro": true,
+ "google.generativeai.protos.EmbedTextRequest.pb": true,
+ "google.generativeai.protos.EmbedTextRequest.serialize": true,
+ "google.generativeai.protos.EmbedTextRequest.text": true,
+ "google.generativeai.protos.EmbedTextRequest.to_dict": true,
+ "google.generativeai.protos.EmbedTextRequest.to_json": true,
+ "google.generativeai.protos.EmbedTextRequest.wrap": true,
+ "google.generativeai.protos.EmbedTextResponse": false,
+ "google.generativeai.protos.EmbedTextResponse.__call__": true,
+ "google.generativeai.protos.EmbedTextResponse.__eq__": true,
+ "google.generativeai.protos.EmbedTextResponse.__ge__": true,
+ "google.generativeai.protos.EmbedTextResponse.__gt__": true,
+ "google.generativeai.protos.EmbedTextResponse.__init__": true,
+ "google.generativeai.protos.EmbedTextResponse.__le__": true,
+ "google.generativeai.protos.EmbedTextResponse.__lt__": true,
+ "google.generativeai.protos.EmbedTextResponse.__ne__": true,
+ "google.generativeai.protos.EmbedTextResponse.__new__": true,
+ "google.generativeai.protos.EmbedTextResponse.__or__": true,
+ "google.generativeai.protos.EmbedTextResponse.__ror__": true,
+ "google.generativeai.protos.EmbedTextResponse.copy_from": true,
+ "google.generativeai.protos.EmbedTextResponse.deserialize": true,
+ "google.generativeai.protos.EmbedTextResponse.embedding": true,
+ "google.generativeai.protos.EmbedTextResponse.from_json": true,
+ "google.generativeai.protos.EmbedTextResponse.mro": true,
+ "google.generativeai.protos.EmbedTextResponse.pb": true,
+ "google.generativeai.protos.EmbedTextResponse.serialize": true,
+ "google.generativeai.protos.EmbedTextResponse.to_dict": true,
+ "google.generativeai.protos.EmbedTextResponse.to_json": true,
+ "google.generativeai.protos.EmbedTextResponse.wrap": true,
+ "google.generativeai.protos.Embedding": false,
+ "google.generativeai.protos.Embedding.__call__": true,
+ "google.generativeai.protos.Embedding.__eq__": true,
+ "google.generativeai.protos.Embedding.__ge__": true,
+ "google.generativeai.protos.Embedding.__gt__": true,
+ "google.generativeai.protos.Embedding.__init__": true,
+ "google.generativeai.protos.Embedding.__le__": true,
+ "google.generativeai.protos.Embedding.__lt__": true,
+ "google.generativeai.protos.Embedding.__ne__": true,
+ "google.generativeai.protos.Embedding.__new__": true,
+ "google.generativeai.protos.Embedding.__or__": true,
+ "google.generativeai.protos.Embedding.__ror__": true,
+ "google.generativeai.protos.Embedding.copy_from": true,
+ "google.generativeai.protos.Embedding.deserialize": true,
+ "google.generativeai.protos.Embedding.from_json": true,
+ "google.generativeai.protos.Embedding.mro": true,
+ "google.generativeai.protos.Embedding.pb": true,
+ "google.generativeai.protos.Embedding.serialize": true,
+ "google.generativeai.protos.Embedding.to_dict": true,
+ "google.generativeai.protos.Embedding.to_json": true,
+ "google.generativeai.protos.Embedding.value": true,
+ "google.generativeai.protos.Embedding.wrap": true,
+ "google.generativeai.protos.Example": false,
+ "google.generativeai.protos.Example.__call__": true,
+ "google.generativeai.protos.Example.__eq__": true,
+ "google.generativeai.protos.Example.__ge__": true,
+ "google.generativeai.protos.Example.__gt__": true,
+ "google.generativeai.protos.Example.__init__": true,
+ "google.generativeai.protos.Example.__le__": true,
+ "google.generativeai.protos.Example.__lt__": true,
+ "google.generativeai.protos.Example.__ne__": true,
+ "google.generativeai.protos.Example.__new__": true,
+ "google.generativeai.protos.Example.__or__": true,
+ "google.generativeai.protos.Example.__ror__": true,
+ "google.generativeai.protos.Example.copy_from": true,
+ "google.generativeai.protos.Example.deserialize": true,
+ "google.generativeai.protos.Example.from_json": true,
+ "google.generativeai.protos.Example.input": true,
+ "google.generativeai.protos.Example.mro": true,
+ "google.generativeai.protos.Example.output": true,
+ "google.generativeai.protos.Example.pb": true,
+ "google.generativeai.protos.Example.serialize": true,
+ "google.generativeai.protos.Example.to_dict": true,
+ "google.generativeai.protos.Example.to_json": true,
+ "google.generativeai.protos.Example.wrap": true,
+ "google.generativeai.protos.ExecutableCode": false,
+ "google.generativeai.protos.ExecutableCode.Language": false,
+ "google.generativeai.protos.ExecutableCode.Language.LANGUAGE_UNSPECIFIED": true,
+ "google.generativeai.protos.ExecutableCode.Language.PYTHON": true,
+ "google.generativeai.protos.ExecutableCode.Language.__abs__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__add__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__and__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__bool__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__contains__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__eq__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__floordiv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__ge__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__getitem__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__gt__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__init__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__invert__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__iter__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__le__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__len__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__lshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__lt__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__mod__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__mul__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__ne__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__neg__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__new__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__or__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__pos__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__pow__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__radd__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rand__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rfloordiv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rlshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rmod__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rmul__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__ror__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rpow__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rrshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rshift__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rsub__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rtruediv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__rxor__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__sub__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__truediv__": true,
+ "google.generativeai.protos.ExecutableCode.Language.__xor__": true,
+ "google.generativeai.protos.ExecutableCode.Language.as_integer_ratio": true,
+ "google.generativeai.protos.ExecutableCode.Language.bit_count": true,
+ "google.generativeai.protos.ExecutableCode.Language.bit_length": true,
+ "google.generativeai.protos.ExecutableCode.Language.conjugate": true,
+ "google.generativeai.protos.ExecutableCode.Language.denominator": true,
+ "google.generativeai.protos.ExecutableCode.Language.from_bytes": true,
+ "google.generativeai.protos.ExecutableCode.Language.imag": true,
+ "google.generativeai.protos.ExecutableCode.Language.is_integer": true,
+ "google.generativeai.protos.ExecutableCode.Language.numerator": true,
+ "google.generativeai.protos.ExecutableCode.Language.real": true,
+ "google.generativeai.protos.ExecutableCode.Language.to_bytes": true,
+ "google.generativeai.protos.ExecutableCode.__call__": true,
+ "google.generativeai.protos.ExecutableCode.__eq__": true,
+ "google.generativeai.protos.ExecutableCode.__ge__": true,
+ "google.generativeai.protos.ExecutableCode.__gt__": true,
+ "google.generativeai.protos.ExecutableCode.__init__": true,
+ "google.generativeai.protos.ExecutableCode.__le__": true,
+ "google.generativeai.protos.ExecutableCode.__lt__": true,
+ "google.generativeai.protos.ExecutableCode.__ne__": true,
+ "google.generativeai.protos.ExecutableCode.__new__": true,
+ "google.generativeai.protos.ExecutableCode.__or__": true,
+ "google.generativeai.protos.ExecutableCode.__ror__": true,
+ "google.generativeai.protos.ExecutableCode.code": true,
+ "google.generativeai.protos.ExecutableCode.copy_from": true,
+ "google.generativeai.protos.ExecutableCode.deserialize": true,
+ "google.generativeai.protos.ExecutableCode.from_json": true,
+ "google.generativeai.protos.ExecutableCode.language": true,
+ "google.generativeai.protos.ExecutableCode.mro": true,
+ "google.generativeai.protos.ExecutableCode.pb": true,
+ "google.generativeai.protos.ExecutableCode.serialize": true,
+ "google.generativeai.protos.ExecutableCode.to_dict": true,
+ "google.generativeai.protos.ExecutableCode.to_json": true,
+ "google.generativeai.protos.ExecutableCode.wrap": true,
+ "google.generativeai.protos.File": false,
+ "google.generativeai.protos.File.State": false,
+ "google.generativeai.protos.File.State.ACTIVE": true,
+ "google.generativeai.protos.File.State.FAILED": true,
+ "google.generativeai.protos.File.State.PROCESSING": true,
+ "google.generativeai.protos.File.State.STATE_UNSPECIFIED": true,
+ "google.generativeai.protos.File.State.__abs__": true,
+ "google.generativeai.protos.File.State.__add__": true,
+ "google.generativeai.protos.File.State.__and__": true,
+ "google.generativeai.protos.File.State.__bool__": true,
+ "google.generativeai.protos.File.State.__contains__": true,
+ "google.generativeai.protos.File.State.__eq__": true,
+ "google.generativeai.protos.File.State.__floordiv__": true,
+ "google.generativeai.protos.File.State.__ge__": true,
+ "google.generativeai.protos.File.State.__getitem__": true,
+ "google.generativeai.protos.File.State.__gt__": true,
+ "google.generativeai.protos.File.State.__init__": true,
+ "google.generativeai.protos.File.State.__invert__": true,
+ "google.generativeai.protos.File.State.__iter__": true,
+ "google.generativeai.protos.File.State.__le__": true,
+ "google.generativeai.protos.File.State.__len__": true,
+ "google.generativeai.protos.File.State.__lshift__": true,
+ "google.generativeai.protos.File.State.__lt__": true,
+ "google.generativeai.protos.File.State.__mod__": true,
+ "google.generativeai.protos.File.State.__mul__": true,
+ "google.generativeai.protos.File.State.__ne__": true,
+ "google.generativeai.protos.File.State.__neg__": true,
+ "google.generativeai.protos.File.State.__new__": true,
+ "google.generativeai.protos.File.State.__or__": true,
+ "google.generativeai.protos.File.State.__pos__": true,
+ "google.generativeai.protos.File.State.__pow__": true,
+ "google.generativeai.protos.File.State.__radd__": true,
+ "google.generativeai.protos.File.State.__rand__": true,
+ "google.generativeai.protos.File.State.__rfloordiv__": true,
+ "google.generativeai.protos.File.State.__rlshift__": true,
+ "google.generativeai.protos.File.State.__rmod__": true,
+ "google.generativeai.protos.File.State.__rmul__": true,
+ "google.generativeai.protos.File.State.__ror__": true,
+ "google.generativeai.protos.File.State.__rpow__": true,
+ "google.generativeai.protos.File.State.__rrshift__": true,
+ "google.generativeai.protos.File.State.__rshift__": true,
+ "google.generativeai.protos.File.State.__rsub__": true,
+ "google.generativeai.protos.File.State.__rtruediv__": true,
+ "google.generativeai.protos.File.State.__rxor__": true,
+ "google.generativeai.protos.File.State.__sub__": true,
+ "google.generativeai.protos.File.State.__truediv__": true,
+ "google.generativeai.protos.File.State.__xor__": true,
+ "google.generativeai.protos.File.State.as_integer_ratio": true,
+ "google.generativeai.protos.File.State.bit_count": true,
+ "google.generativeai.protos.File.State.bit_length": true,
+ "google.generativeai.protos.File.State.conjugate": true,
+ "google.generativeai.protos.File.State.denominator": true,
+ "google.generativeai.protos.File.State.from_bytes": true,
+ "google.generativeai.protos.File.State.imag": true,
+ "google.generativeai.protos.File.State.is_integer": true,
+ "google.generativeai.protos.File.State.numerator": true,
+ "google.generativeai.protos.File.State.real": true,
+ "google.generativeai.protos.File.State.to_bytes": true,
+ "google.generativeai.protos.File.__call__": true,
+ "google.generativeai.protos.File.__eq__": true,
+ "google.generativeai.protos.File.__ge__": true,
+ "google.generativeai.protos.File.__gt__": true,
+ "google.generativeai.protos.File.__init__": true,
+ "google.generativeai.protos.File.__le__": true,
+ "google.generativeai.protos.File.__lt__": true,
+ "google.generativeai.protos.File.__ne__": true,
+ "google.generativeai.protos.File.__new__": true,
+ "google.generativeai.protos.File.__or__": true,
+ "google.generativeai.protos.File.__ror__": true,
+ "google.generativeai.protos.File.copy_from": true,
+ "google.generativeai.protos.File.create_time": true,
+ "google.generativeai.protos.File.deserialize": true,
+ "google.generativeai.protos.File.display_name": true,
+ "google.generativeai.protos.File.error": true,
+ "google.generativeai.protos.File.expiration_time": true,
+ "google.generativeai.protos.File.from_json": true,
+ "google.generativeai.protos.File.mime_type": true,
+ "google.generativeai.protos.File.mro": true,
+ "google.generativeai.protos.File.name": true,
+ "google.generativeai.protos.File.pb": true,
+ "google.generativeai.protos.File.serialize": true,
+ "google.generativeai.protos.File.sha256_hash": true,
+ "google.generativeai.protos.File.size_bytes": true,
+ "google.generativeai.protos.File.state": true,
+ "google.generativeai.protos.File.to_dict": true,
+ "google.generativeai.protos.File.to_json": true,
+ "google.generativeai.protos.File.update_time": true,
+ "google.generativeai.protos.File.uri": true,
+ "google.generativeai.protos.File.video_metadata": true,
+ "google.generativeai.protos.File.wrap": true,
+ "google.generativeai.protos.FileData": false,
+ "google.generativeai.protos.FileData.__call__": true,
+ "google.generativeai.protos.FileData.__eq__": true,
+ "google.generativeai.protos.FileData.__ge__": true,
+ "google.generativeai.protos.FileData.__gt__": true,
+ "google.generativeai.protos.FileData.__init__": true,
+ "google.generativeai.protos.FileData.__le__": true,
+ "google.generativeai.protos.FileData.__lt__": true,
+ "google.generativeai.protos.FileData.__ne__": true,
+ "google.generativeai.protos.FileData.__new__": true,
+ "google.generativeai.protos.FileData.__or__": true,
+ "google.generativeai.protos.FileData.__ror__": true,
+ "google.generativeai.protos.FileData.copy_from": true,
+ "google.generativeai.protos.FileData.deserialize": true,
+ "google.generativeai.protos.FileData.file_uri": true,
+ "google.generativeai.protos.FileData.from_json": true,
+ "google.generativeai.protos.FileData.mime_type": true,
+ "google.generativeai.protos.FileData.mro": true,
+ "google.generativeai.protos.FileData.pb": true,
+ "google.generativeai.protos.FileData.serialize": true,
+ "google.generativeai.protos.FileData.to_dict": true,
+ "google.generativeai.protos.FileData.to_json": true,
+ "google.generativeai.protos.FileData.wrap": true,
+ "google.generativeai.protos.FunctionCall": false,
+ "google.generativeai.protos.FunctionCall.__call__": true,
+ "google.generativeai.protos.FunctionCall.__eq__": true,
+ "google.generativeai.protos.FunctionCall.__ge__": true,
+ "google.generativeai.protos.FunctionCall.__gt__": true,
+ "google.generativeai.protos.FunctionCall.__init__": true,
+ "google.generativeai.protos.FunctionCall.__le__": true,
+ "google.generativeai.protos.FunctionCall.__lt__": true,
+ "google.generativeai.protos.FunctionCall.__ne__": true,
+ "google.generativeai.protos.FunctionCall.__new__": true,
+ "google.generativeai.protos.FunctionCall.__or__": true,
+ "google.generativeai.protos.FunctionCall.__ror__": true,
+ "google.generativeai.protos.FunctionCall.args": true,
+ "google.generativeai.protos.FunctionCall.copy_from": true,
+ "google.generativeai.protos.FunctionCall.deserialize": true,
+ "google.generativeai.protos.FunctionCall.from_json": true,
+ "google.generativeai.protos.FunctionCall.mro": true,
+ "google.generativeai.protos.FunctionCall.name": true,
+ "google.generativeai.protos.FunctionCall.pb": true,
+ "google.generativeai.protos.FunctionCall.serialize": true,
+ "google.generativeai.protos.FunctionCall.to_dict": true,
+ "google.generativeai.protos.FunctionCall.to_json": true,
+ "google.generativeai.protos.FunctionCall.wrap": true,
+ "google.generativeai.protos.FunctionCallingConfig": false,
+ "google.generativeai.protos.FunctionCallingConfig.Mode": false,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.ANY": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.AUTO": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.MODE_UNSPECIFIED": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.NONE": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__abs__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__add__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__and__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__bool__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__eq__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__floordiv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ge__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__gt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__init__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__invert__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__le__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__lt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mod__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__mul__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ne__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__neg__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__new__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__or__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pos__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__pow__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__radd__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rand__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rfloordiv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rlshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmod__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rmul__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__ror__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rpow__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rrshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rshift__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rsub__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rtruediv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__rxor__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__sub__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__truediv__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__xor__": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.as_integer_ratio": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_count": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.bit_length": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.conjugate": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.denominator": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.from_bytes": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.imag": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.is_integer": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.numerator": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.real": true,
+ "google.generativeai.protos.FunctionCallingConfig.Mode.to_bytes": true,
+ "google.generativeai.protos.FunctionCallingConfig.__call__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__eq__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__ge__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__gt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__init__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__le__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__lt__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__ne__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__new__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__or__": true,
+ "google.generativeai.protos.FunctionCallingConfig.__ror__": true,
+ "google.generativeai.protos.FunctionCallingConfig.allowed_function_names": true,
+ "google.generativeai.protos.FunctionCallingConfig.copy_from": true,
+ "google.generativeai.protos.FunctionCallingConfig.deserialize": true,
+ "google.generativeai.protos.FunctionCallingConfig.from_json": true,
+ "google.generativeai.protos.FunctionCallingConfig.mode": true,
+ "google.generativeai.protos.FunctionCallingConfig.mro": true,
+ "google.generativeai.protos.FunctionCallingConfig.pb": true,
+ "google.generativeai.protos.FunctionCallingConfig.serialize": true,
+ "google.generativeai.protos.FunctionCallingConfig.to_dict": true,
+ "google.generativeai.protos.FunctionCallingConfig.to_json": true,
+ "google.generativeai.protos.FunctionCallingConfig.wrap": true,
+ "google.generativeai.protos.FunctionDeclaration": false,
+ "google.generativeai.protos.FunctionDeclaration.__call__": true,
+ "google.generativeai.protos.FunctionDeclaration.__eq__": true,
+ "google.generativeai.protos.FunctionDeclaration.__ge__": true,
+ "google.generativeai.protos.FunctionDeclaration.__gt__": true,
+ "google.generativeai.protos.FunctionDeclaration.__init__": true,
+ "google.generativeai.protos.FunctionDeclaration.__le__": true,
+ "google.generativeai.protos.FunctionDeclaration.__lt__": true,
+ "google.generativeai.protos.FunctionDeclaration.__ne__": true,
+ "google.generativeai.protos.FunctionDeclaration.__new__": true,
+ "google.generativeai.protos.FunctionDeclaration.__or__": true,
+ "google.generativeai.protos.FunctionDeclaration.__ror__": true,
+ "google.generativeai.protos.FunctionDeclaration.copy_from": true,
+ "google.generativeai.protos.FunctionDeclaration.description": true,
+ "google.generativeai.protos.FunctionDeclaration.deserialize": true,
+ "google.generativeai.protos.FunctionDeclaration.from_json": true,
+ "google.generativeai.protos.FunctionDeclaration.mro": true,
+ "google.generativeai.protos.FunctionDeclaration.name": true,
+ "google.generativeai.protos.FunctionDeclaration.parameters": true,
+ "google.generativeai.protos.FunctionDeclaration.pb": true,
+ "google.generativeai.protos.FunctionDeclaration.serialize": true,
+ "google.generativeai.protos.FunctionDeclaration.to_dict": true,
+ "google.generativeai.protos.FunctionDeclaration.to_json": true,
+ "google.generativeai.protos.FunctionDeclaration.wrap": true,
+ "google.generativeai.protos.FunctionResponse": false,
+ "google.generativeai.protos.FunctionResponse.__call__": true,
+ "google.generativeai.protos.FunctionResponse.__eq__": true,
+ "google.generativeai.protos.FunctionResponse.__ge__": true,
+ "google.generativeai.protos.FunctionResponse.__gt__": true,
+ "google.generativeai.protos.FunctionResponse.__init__": true,
+ "google.generativeai.protos.FunctionResponse.__le__": true,
+ "google.generativeai.protos.FunctionResponse.__lt__": true,
+ "google.generativeai.protos.FunctionResponse.__ne__": true,
+ "google.generativeai.protos.FunctionResponse.__new__": true,
+ "google.generativeai.protos.FunctionResponse.__or__": true,
+ "google.generativeai.protos.FunctionResponse.__ror__": true,
+ "google.generativeai.protos.FunctionResponse.copy_from": true,
+ "google.generativeai.protos.FunctionResponse.deserialize": true,
+ "google.generativeai.protos.FunctionResponse.from_json": true,
+ "google.generativeai.protos.FunctionResponse.mro": true,
+ "google.generativeai.protos.FunctionResponse.name": true,
+ "google.generativeai.protos.FunctionResponse.pb": true,
+ "google.generativeai.protos.FunctionResponse.response": true,
+ "google.generativeai.protos.FunctionResponse.serialize": true,
+ "google.generativeai.protos.FunctionResponse.to_dict": true,
+ "google.generativeai.protos.FunctionResponse.to_json": true,
+ "google.generativeai.protos.FunctionResponse.wrap": true,
+ "google.generativeai.protos.GenerateAnswerRequest": false,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": false,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.ANSWER_STYLE_UNSPECIFIED": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.VERBOSE": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__abs__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__add__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__and__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__bool__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__floordiv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__init__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__invert__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__le__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mod__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__mul__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__neg__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__new__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__or__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pos__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__pow__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__radd__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rand__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rfloordiv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rlshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmod__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rmul__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rpow__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rrshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rshift__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rsub__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rtruediv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__rxor__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__sub__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__truediv__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__xor__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.as_integer_ratio": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_count": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.bit_length": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.conjugate": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.denominator": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.from_bytes": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.imag": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.is_integer": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.numerator": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.real": true,
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.to_bytes": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__call__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__init__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__le__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__new__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__or__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerRequest.answer_style": true,
+ "google.generativeai.protos.GenerateAnswerRequest.contents": true,
+ "google.generativeai.protos.GenerateAnswerRequest.copy_from": true,
+ "google.generativeai.protos.GenerateAnswerRequest.deserialize": true,
+ "google.generativeai.protos.GenerateAnswerRequest.from_json": true,
+ "google.generativeai.protos.GenerateAnswerRequest.inline_passages": true,
+ "google.generativeai.protos.GenerateAnswerRequest.model": true,
+ "google.generativeai.protos.GenerateAnswerRequest.mro": true,
+ "google.generativeai.protos.GenerateAnswerRequest.pb": true,
+ "google.generativeai.protos.GenerateAnswerRequest.safety_settings": true,
+ "google.generativeai.protos.GenerateAnswerRequest.semantic_retriever": true,
+ "google.generativeai.protos.GenerateAnswerRequest.serialize": true,
+ "google.generativeai.protos.GenerateAnswerRequest.temperature": true,
+ "google.generativeai.protos.GenerateAnswerRequest.to_dict": true,
+ "google.generativeai.protos.GenerateAnswerRequest.to_json": true,
+ "google.generativeai.protos.GenerateAnswerRequest.wrap": true,
+ "google.generativeai.protos.GenerateAnswerResponse": false,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": false,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": false,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.OTHER": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.SAFETY": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__abs__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__add__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__and__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__bool__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__floordiv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__init__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__invert__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__le__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mod__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__mul__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__neg__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__new__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__or__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pos__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__pow__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__radd__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rand__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rfloordiv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rlshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmod__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rmul__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rpow__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rrshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rshift__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rsub__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rtruediv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__rxor__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__sub__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__truediv__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__xor__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.as_integer_ratio": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_count": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.bit_length": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.conjugate": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.denominator": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.from_bytes": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.imag": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.is_integer": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.numerator": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.real": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.to_bytes": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__call__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__init__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__le__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__new__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__or__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.block_reason": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.mro": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.safety_ratings": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__call__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__eq__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__ge__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__gt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__init__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__le__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__lt__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__ne__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__new__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__or__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.__ror__": true,
+ "google.generativeai.protos.GenerateAnswerResponse.answer": true,
+ "google.generativeai.protos.GenerateAnswerResponse.answerable_probability": true,
+ "google.generativeai.protos.GenerateAnswerResponse.copy_from": true,
+ "google.generativeai.protos.GenerateAnswerResponse.deserialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.from_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.input_feedback": true,
+ "google.generativeai.protos.GenerateAnswerResponse.mro": true,
+ "google.generativeai.protos.GenerateAnswerResponse.pb": true,
+ "google.generativeai.protos.GenerateAnswerResponse.serialize": true,
+ "google.generativeai.protos.GenerateAnswerResponse.to_dict": true,
+ "google.generativeai.protos.GenerateAnswerResponse.to_json": true,
+ "google.generativeai.protos.GenerateAnswerResponse.wrap": true,
+ "google.generativeai.protos.GenerateContentRequest": false,
+ "google.generativeai.protos.GenerateContentRequest.__call__": true,
+ "google.generativeai.protos.GenerateContentRequest.__eq__": true,
+ "google.generativeai.protos.GenerateContentRequest.__ge__": true,
+ "google.generativeai.protos.GenerateContentRequest.__gt__": true,
+ "google.generativeai.protos.GenerateContentRequest.__init__": true,
+ "google.generativeai.protos.GenerateContentRequest.__le__": true,
+ "google.generativeai.protos.GenerateContentRequest.__lt__": true,
+ "google.generativeai.protos.GenerateContentRequest.__ne__": true,
+ "google.generativeai.protos.GenerateContentRequest.__new__": true,
+ "google.generativeai.protos.GenerateContentRequest.__or__": true,
+ "google.generativeai.protos.GenerateContentRequest.__ror__": true,
+ "google.generativeai.protos.GenerateContentRequest.cached_content": true,
+ "google.generativeai.protos.GenerateContentRequest.contents": true,
+ "google.generativeai.protos.GenerateContentRequest.copy_from": true,
+ "google.generativeai.protos.GenerateContentRequest.deserialize": true,
+ "google.generativeai.protos.GenerateContentRequest.from_json": true,
+ "google.generativeai.protos.GenerateContentRequest.generation_config": true,
+ "google.generativeai.protos.GenerateContentRequest.model": true,
+ "google.generativeai.protos.GenerateContentRequest.mro": true,
+ "google.generativeai.protos.GenerateContentRequest.pb": true,
+ "google.generativeai.protos.GenerateContentRequest.safety_settings": true,
+ "google.generativeai.protos.GenerateContentRequest.serialize": true,
+ "google.generativeai.protos.GenerateContentRequest.system_instruction": true,
+ "google.generativeai.protos.GenerateContentRequest.to_dict": true,
+ "google.generativeai.protos.GenerateContentRequest.to_json": true,
+ "google.generativeai.protos.GenerateContentRequest.tool_config": true,
+ "google.generativeai.protos.GenerateContentRequest.tools": true,
+ "google.generativeai.protos.GenerateContentRequest.wrap": true,
+ "google.generativeai.protos.GenerateContentResponse": false,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback": false,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": false,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCKLIST": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.BLOCK_REASON_UNSPECIFIED": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.OTHER": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.PROHIBITED_CONTENT": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.SAFETY": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__abs__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__add__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__and__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__bool__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__floordiv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__invert__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mod__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__mul__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__neg__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pos__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__pow__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__radd__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rand__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rfloordiv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rlshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmod__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rmul__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rpow__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rrshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rshift__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rsub__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rtruediv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__rxor__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__sub__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__truediv__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__xor__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.as_integer_ratio": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_count": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.bit_length": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.conjugate": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.denominator": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.from_bytes": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.imag": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.is_integer": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.numerator": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.real": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.to_bytes": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__call__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.block_reason": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.mro": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.safety_ratings": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": true,
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata": false,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__call__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.cached_content_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.candidates_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.mro": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.prompt_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.total_token_count": true,
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": true,
+ "google.generativeai.protos.GenerateContentResponse.__call__": true,
+ "google.generativeai.protos.GenerateContentResponse.__eq__": true,
+ "google.generativeai.protos.GenerateContentResponse.__ge__": true,
+ "google.generativeai.protos.GenerateContentResponse.__gt__": true,
+ "google.generativeai.protos.GenerateContentResponse.__init__": true,
+ "google.generativeai.protos.GenerateContentResponse.__le__": true,
+ "google.generativeai.protos.GenerateContentResponse.__lt__": true,
+ "google.generativeai.protos.GenerateContentResponse.__ne__": true,
+ "google.generativeai.protos.GenerateContentResponse.__new__": true,
+ "google.generativeai.protos.GenerateContentResponse.__or__": true,
+ "google.generativeai.protos.GenerateContentResponse.__ror__": true,
+ "google.generativeai.protos.GenerateContentResponse.candidates": true,
+ "google.generativeai.protos.GenerateContentResponse.copy_from": true,
+ "google.generativeai.protos.GenerateContentResponse.deserialize": true,
+ "google.generativeai.protos.GenerateContentResponse.from_json": true,
+ "google.generativeai.protos.GenerateContentResponse.mro": true,
+ "google.generativeai.protos.GenerateContentResponse.pb": true,
+ "google.generativeai.protos.GenerateContentResponse.prompt_feedback": true,
+ "google.generativeai.protos.GenerateContentResponse.serialize": true,
+ "google.generativeai.protos.GenerateContentResponse.to_dict": true,
+ "google.generativeai.protos.GenerateContentResponse.to_json": true,
+ "google.generativeai.protos.GenerateContentResponse.usage_metadata": true,
+ "google.generativeai.protos.GenerateContentResponse.wrap": true,
+ "google.generativeai.protos.GenerateMessageRequest": false,
+ "google.generativeai.protos.GenerateMessageRequest.__call__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__eq__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__ge__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__gt__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__init__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__le__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__lt__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__ne__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__new__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__or__": true,
+ "google.generativeai.protos.GenerateMessageRequest.__ror__": true,
+ "google.generativeai.protos.GenerateMessageRequest.candidate_count": true,
+ "google.generativeai.protos.GenerateMessageRequest.copy_from": true,
+ "google.generativeai.protos.GenerateMessageRequest.deserialize": true,
+ "google.generativeai.protos.GenerateMessageRequest.from_json": true,
+ "google.generativeai.protos.GenerateMessageRequest.model": true,
+ "google.generativeai.protos.GenerateMessageRequest.mro": true,
+ "google.generativeai.protos.GenerateMessageRequest.pb": true,
+ "google.generativeai.protos.GenerateMessageRequest.prompt": true,
+ "google.generativeai.protos.GenerateMessageRequest.serialize": true,
+ "google.generativeai.protos.GenerateMessageRequest.temperature": true,
+ "google.generativeai.protos.GenerateMessageRequest.to_dict": true,
+ "google.generativeai.protos.GenerateMessageRequest.to_json": true,
+ "google.generativeai.protos.GenerateMessageRequest.top_k": true,
+ "google.generativeai.protos.GenerateMessageRequest.top_p": true,
+ "google.generativeai.protos.GenerateMessageRequest.wrap": true,
+ "google.generativeai.protos.GenerateMessageResponse": false,
+ "google.generativeai.protos.GenerateMessageResponse.__call__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__eq__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__ge__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__gt__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__init__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__le__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__lt__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__ne__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__new__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__or__": true,
+ "google.generativeai.protos.GenerateMessageResponse.__ror__": true,
+ "google.generativeai.protos.GenerateMessageResponse.candidates": true,
+ "google.generativeai.protos.GenerateMessageResponse.copy_from": true,
+ "google.generativeai.protos.GenerateMessageResponse.deserialize": true,
+ "google.generativeai.protos.GenerateMessageResponse.filters": true,
+ "google.generativeai.protos.GenerateMessageResponse.from_json": true,
+ "google.generativeai.protos.GenerateMessageResponse.messages": true,
+ "google.generativeai.protos.GenerateMessageResponse.mro": true,
+ "google.generativeai.protos.GenerateMessageResponse.pb": true,
+ "google.generativeai.protos.GenerateMessageResponse.serialize": true,
+ "google.generativeai.protos.GenerateMessageResponse.to_dict": true,
+ "google.generativeai.protos.GenerateMessageResponse.to_json": true,
+ "google.generativeai.protos.GenerateMessageResponse.wrap": true,
+ "google.generativeai.protos.GenerateTextRequest": false,
+ "google.generativeai.protos.GenerateTextRequest.__call__": true,
+ "google.generativeai.protos.GenerateTextRequest.__eq__": true,
+ "google.generativeai.protos.GenerateTextRequest.__ge__": true,
+ "google.generativeai.protos.GenerateTextRequest.__gt__": true,
+ "google.generativeai.protos.GenerateTextRequest.__init__": true,
+ "google.generativeai.protos.GenerateTextRequest.__le__": true,
+ "google.generativeai.protos.GenerateTextRequest.__lt__": true,
+ "google.generativeai.protos.GenerateTextRequest.__ne__": true,
+ "google.generativeai.protos.GenerateTextRequest.__new__": true,
+ "google.generativeai.protos.GenerateTextRequest.__or__": true,
+ "google.generativeai.protos.GenerateTextRequest.__ror__": true,
+ "google.generativeai.protos.GenerateTextRequest.candidate_count": true,
+ "google.generativeai.protos.GenerateTextRequest.copy_from": true,
+ "google.generativeai.protos.GenerateTextRequest.deserialize": true,
+ "google.generativeai.protos.GenerateTextRequest.from_json": true,
+ "google.generativeai.protos.GenerateTextRequest.max_output_tokens": true,
+ "google.generativeai.protos.GenerateTextRequest.model": true,
+ "google.generativeai.protos.GenerateTextRequest.mro": true,
+ "google.generativeai.protos.GenerateTextRequest.pb": true,
+ "google.generativeai.protos.GenerateTextRequest.prompt": true,
+ "google.generativeai.protos.GenerateTextRequest.safety_settings": true,
+ "google.generativeai.protos.GenerateTextRequest.serialize": true,
+ "google.generativeai.protos.GenerateTextRequest.stop_sequences": true,
+ "google.generativeai.protos.GenerateTextRequest.temperature": true,
+ "google.generativeai.protos.GenerateTextRequest.to_dict": true,
+ "google.generativeai.protos.GenerateTextRequest.to_json": true,
+ "google.generativeai.protos.GenerateTextRequest.top_k": true,
+ "google.generativeai.protos.GenerateTextRequest.top_p": true,
+ "google.generativeai.protos.GenerateTextRequest.wrap": true,
+ "google.generativeai.protos.GenerateTextResponse": false,
+ "google.generativeai.protos.GenerateTextResponse.__call__": true,
+ "google.generativeai.protos.GenerateTextResponse.__eq__": true,
+ "google.generativeai.protos.GenerateTextResponse.__ge__": true,
+ "google.generativeai.protos.GenerateTextResponse.__gt__": true,
+ "google.generativeai.protos.GenerateTextResponse.__init__": true,
+ "google.generativeai.protos.GenerateTextResponse.__le__": true,
+ "google.generativeai.protos.GenerateTextResponse.__lt__": true,
+ "google.generativeai.protos.GenerateTextResponse.__ne__": true,
+ "google.generativeai.protos.GenerateTextResponse.__new__": true,
+ "google.generativeai.protos.GenerateTextResponse.__or__": true,
+ "google.generativeai.protos.GenerateTextResponse.__ror__": true,
+ "google.generativeai.protos.GenerateTextResponse.candidates": true,
+ "google.generativeai.protos.GenerateTextResponse.copy_from": true,
+ "google.generativeai.protos.GenerateTextResponse.deserialize": true,
+ "google.generativeai.protos.GenerateTextResponse.filters": true,
+ "google.generativeai.protos.GenerateTextResponse.from_json": true,
+ "google.generativeai.protos.GenerateTextResponse.mro": true,
+ "google.generativeai.protos.GenerateTextResponse.pb": true,
+ "google.generativeai.protos.GenerateTextResponse.safety_feedback": true,
+ "google.generativeai.protos.GenerateTextResponse.serialize": true,
+ "google.generativeai.protos.GenerateTextResponse.to_dict": true,
+ "google.generativeai.protos.GenerateTextResponse.to_json": true,
+ "google.generativeai.protos.GenerateTextResponse.wrap": true,
+ "google.generativeai.protos.GenerationConfig": false,
+ "google.generativeai.protos.GenerationConfig.__call__": true,
+ "google.generativeai.protos.GenerationConfig.__eq__": true,
+ "google.generativeai.protos.GenerationConfig.__ge__": true,
+ "google.generativeai.protos.GenerationConfig.__gt__": true,
+ "google.generativeai.protos.GenerationConfig.__init__": true,
+ "google.generativeai.protos.GenerationConfig.__le__": true,
+ "google.generativeai.protos.GenerationConfig.__lt__": true,
+ "google.generativeai.protos.GenerationConfig.__ne__": true,
+ "google.generativeai.protos.GenerationConfig.__new__": true,
+ "google.generativeai.protos.GenerationConfig.__or__": true,
+ "google.generativeai.protos.GenerationConfig.__ror__": true,
+ "google.generativeai.protos.GenerationConfig.candidate_count": true,
+ "google.generativeai.protos.GenerationConfig.copy_from": true,
+ "google.generativeai.protos.GenerationConfig.deserialize": true,
+ "google.generativeai.protos.GenerationConfig.frequency_penalty": true,
+ "google.generativeai.protos.GenerationConfig.from_json": true,
+ "google.generativeai.protos.GenerationConfig.logprobs": true,
+ "google.generativeai.protos.GenerationConfig.max_output_tokens": true,
+ "google.generativeai.protos.GenerationConfig.mro": true,
+ "google.generativeai.protos.GenerationConfig.pb": true,
+ "google.generativeai.protos.GenerationConfig.presence_penalty": true,
+ "google.generativeai.protos.GenerationConfig.response_logprobs": true,
+ "google.generativeai.protos.GenerationConfig.response_mime_type": true,
+ "google.generativeai.protos.GenerationConfig.response_schema": true,
+ "google.generativeai.protos.GenerationConfig.serialize": true,
+ "google.generativeai.protos.GenerationConfig.stop_sequences": true,
+ "google.generativeai.protos.GenerationConfig.temperature": true,
+ "google.generativeai.protos.GenerationConfig.to_dict": true,
+ "google.generativeai.protos.GenerationConfig.to_json": true,
+ "google.generativeai.protos.GenerationConfig.top_k": true,
+ "google.generativeai.protos.GenerationConfig.top_p": true,
+ "google.generativeai.protos.GenerationConfig.wrap": true,
+ "google.generativeai.protos.GetCachedContentRequest": false,
+ "google.generativeai.protos.GetCachedContentRequest.__call__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__init__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__le__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__new__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__or__": true,
+ "google.generativeai.protos.GetCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.GetCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.GetCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.GetCachedContentRequest.from_json": true,
+ "google.generativeai.protos.GetCachedContentRequest.mro": true,
+ "google.generativeai.protos.GetCachedContentRequest.name": true,
+ "google.generativeai.protos.GetCachedContentRequest.pb": true,
+ "google.generativeai.protos.GetCachedContentRequest.serialize": true,
+ "google.generativeai.protos.GetCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.GetCachedContentRequest.to_json": true,
+ "google.generativeai.protos.GetCachedContentRequest.wrap": true,
+ "google.generativeai.protos.GetChunkRequest": false,
+ "google.generativeai.protos.GetChunkRequest.__call__": true,
+ "google.generativeai.protos.GetChunkRequest.__eq__": true,
+ "google.generativeai.protos.GetChunkRequest.__ge__": true,
+ "google.generativeai.protos.GetChunkRequest.__gt__": true,
+ "google.generativeai.protos.GetChunkRequest.__init__": true,
+ "google.generativeai.protos.GetChunkRequest.__le__": true,
+ "google.generativeai.protos.GetChunkRequest.__lt__": true,
+ "google.generativeai.protos.GetChunkRequest.__ne__": true,
+ "google.generativeai.protos.GetChunkRequest.__new__": true,
+ "google.generativeai.protos.GetChunkRequest.__or__": true,
+ "google.generativeai.protos.GetChunkRequest.__ror__": true,
+ "google.generativeai.protos.GetChunkRequest.copy_from": true,
+ "google.generativeai.protos.GetChunkRequest.deserialize": true,
+ "google.generativeai.protos.GetChunkRequest.from_json": true,
+ "google.generativeai.protos.GetChunkRequest.mro": true,
+ "google.generativeai.protos.GetChunkRequest.name": true,
+ "google.generativeai.protos.GetChunkRequest.pb": true,
+ "google.generativeai.protos.GetChunkRequest.serialize": true,
+ "google.generativeai.protos.GetChunkRequest.to_dict": true,
+ "google.generativeai.protos.GetChunkRequest.to_json": true,
+ "google.generativeai.protos.GetChunkRequest.wrap": true,
+ "google.generativeai.protos.GetCorpusRequest": false,
+ "google.generativeai.protos.GetCorpusRequest.__call__": true,
+ "google.generativeai.protos.GetCorpusRequest.__eq__": true,
+ "google.generativeai.protos.GetCorpusRequest.__ge__": true,
+ "google.generativeai.protos.GetCorpusRequest.__gt__": true,
+ "google.generativeai.protos.GetCorpusRequest.__init__": true,
+ "google.generativeai.protos.GetCorpusRequest.__le__": true,
+ "google.generativeai.protos.GetCorpusRequest.__lt__": true,
+ "google.generativeai.protos.GetCorpusRequest.__ne__": true,
+ "google.generativeai.protos.GetCorpusRequest.__new__": true,
+ "google.generativeai.protos.GetCorpusRequest.__or__": true,
+ "google.generativeai.protos.GetCorpusRequest.__ror__": true,
+ "google.generativeai.protos.GetCorpusRequest.copy_from": true,
+ "google.generativeai.protos.GetCorpusRequest.deserialize": true,
+ "google.generativeai.protos.GetCorpusRequest.from_json": true,
+ "google.generativeai.protos.GetCorpusRequest.mro": true,
+ "google.generativeai.protos.GetCorpusRequest.name": true,
+ "google.generativeai.protos.GetCorpusRequest.pb": true,
+ "google.generativeai.protos.GetCorpusRequest.serialize": true,
+ "google.generativeai.protos.GetCorpusRequest.to_dict": true,
+ "google.generativeai.protos.GetCorpusRequest.to_json": true,
+ "google.generativeai.protos.GetCorpusRequest.wrap": true,
+ "google.generativeai.protos.GetDocumentRequest": false,
+ "google.generativeai.protos.GetDocumentRequest.__call__": true,
+ "google.generativeai.protos.GetDocumentRequest.__eq__": true,
+ "google.generativeai.protos.GetDocumentRequest.__ge__": true,
+ "google.generativeai.protos.GetDocumentRequest.__gt__": true,
+ "google.generativeai.protos.GetDocumentRequest.__init__": true,
+ "google.generativeai.protos.GetDocumentRequest.__le__": true,
+ "google.generativeai.protos.GetDocumentRequest.__lt__": true,
+ "google.generativeai.protos.GetDocumentRequest.__ne__": true,
+ "google.generativeai.protos.GetDocumentRequest.__new__": true,
+ "google.generativeai.protos.GetDocumentRequest.__or__": true,
+ "google.generativeai.protos.GetDocumentRequest.__ror__": true,
+ "google.generativeai.protos.GetDocumentRequest.copy_from": true,
+ "google.generativeai.protos.GetDocumentRequest.deserialize": true,
+ "google.generativeai.protos.GetDocumentRequest.from_json": true,
+ "google.generativeai.protos.GetDocumentRequest.mro": true,
+ "google.generativeai.protos.GetDocumentRequest.name": true,
+ "google.generativeai.protos.GetDocumentRequest.pb": true,
+ "google.generativeai.protos.GetDocumentRequest.serialize": true,
+ "google.generativeai.protos.GetDocumentRequest.to_dict": true,
+ "google.generativeai.protos.GetDocumentRequest.to_json": true,
+ "google.generativeai.protos.GetDocumentRequest.wrap": true,
+ "google.generativeai.protos.GetFileRequest": false,
+ "google.generativeai.protos.GetFileRequest.__call__": true,
+ "google.generativeai.protos.GetFileRequest.__eq__": true,
+ "google.generativeai.protos.GetFileRequest.__ge__": true,
+ "google.generativeai.protos.GetFileRequest.__gt__": true,
+ "google.generativeai.protos.GetFileRequest.__init__": true,
+ "google.generativeai.protos.GetFileRequest.__le__": true,
+ "google.generativeai.protos.GetFileRequest.__lt__": true,
+ "google.generativeai.protos.GetFileRequest.__ne__": true,
+ "google.generativeai.protos.GetFileRequest.__new__": true,
+ "google.generativeai.protos.GetFileRequest.__or__": true,
+ "google.generativeai.protos.GetFileRequest.__ror__": true,
+ "google.generativeai.protos.GetFileRequest.copy_from": true,
+ "google.generativeai.protos.GetFileRequest.deserialize": true,
+ "google.generativeai.protos.GetFileRequest.from_json": true,
+ "google.generativeai.protos.GetFileRequest.mro": true,
+ "google.generativeai.protos.GetFileRequest.name": true,
+ "google.generativeai.protos.GetFileRequest.pb": true,
+ "google.generativeai.protos.GetFileRequest.serialize": true,
+ "google.generativeai.protos.GetFileRequest.to_dict": true,
+ "google.generativeai.protos.GetFileRequest.to_json": true,
+ "google.generativeai.protos.GetFileRequest.wrap": true,
+ "google.generativeai.protos.GetModelRequest": false,
+ "google.generativeai.protos.GetModelRequest.__call__": true,
+ "google.generativeai.protos.GetModelRequest.__eq__": true,
+ "google.generativeai.protos.GetModelRequest.__ge__": true,
+ "google.generativeai.protos.GetModelRequest.__gt__": true,
+ "google.generativeai.protos.GetModelRequest.__init__": true,
+ "google.generativeai.protos.GetModelRequest.__le__": true,
+ "google.generativeai.protos.GetModelRequest.__lt__": true,
+ "google.generativeai.protos.GetModelRequest.__ne__": true,
+ "google.generativeai.protos.GetModelRequest.__new__": true,
+ "google.generativeai.protos.GetModelRequest.__or__": true,
+ "google.generativeai.protos.GetModelRequest.__ror__": true,
+ "google.generativeai.protos.GetModelRequest.copy_from": true,
+ "google.generativeai.protos.GetModelRequest.deserialize": true,
+ "google.generativeai.protos.GetModelRequest.from_json": true,
+ "google.generativeai.protos.GetModelRequest.mro": true,
+ "google.generativeai.protos.GetModelRequest.name": true,
+ "google.generativeai.protos.GetModelRequest.pb": true,
+ "google.generativeai.protos.GetModelRequest.serialize": true,
+ "google.generativeai.protos.GetModelRequest.to_dict": true,
+ "google.generativeai.protos.GetModelRequest.to_json": true,
+ "google.generativeai.protos.GetModelRequest.wrap": true,
+ "google.generativeai.protos.GetPermissionRequest": false,
+ "google.generativeai.protos.GetPermissionRequest.__call__": true,
+ "google.generativeai.protos.GetPermissionRequest.__eq__": true,
+ "google.generativeai.protos.GetPermissionRequest.__ge__": true,
+ "google.generativeai.protos.GetPermissionRequest.__gt__": true,
+ "google.generativeai.protos.GetPermissionRequest.__init__": true,
+ "google.generativeai.protos.GetPermissionRequest.__le__": true,
+ "google.generativeai.protos.GetPermissionRequest.__lt__": true,
+ "google.generativeai.protos.GetPermissionRequest.__ne__": true,
+ "google.generativeai.protos.GetPermissionRequest.__new__": true,
+ "google.generativeai.protos.GetPermissionRequest.__or__": true,
+ "google.generativeai.protos.GetPermissionRequest.__ror__": true,
+ "google.generativeai.protos.GetPermissionRequest.copy_from": true,
+ "google.generativeai.protos.GetPermissionRequest.deserialize": true,
+ "google.generativeai.protos.GetPermissionRequest.from_json": true,
+ "google.generativeai.protos.GetPermissionRequest.mro": true,
+ "google.generativeai.protos.GetPermissionRequest.name": true,
+ "google.generativeai.protos.GetPermissionRequest.pb": true,
+ "google.generativeai.protos.GetPermissionRequest.serialize": true,
+ "google.generativeai.protos.GetPermissionRequest.to_dict": true,
+ "google.generativeai.protos.GetPermissionRequest.to_json": true,
+ "google.generativeai.protos.GetPermissionRequest.wrap": true,
+ "google.generativeai.protos.GetTunedModelRequest": false,
+ "google.generativeai.protos.GetTunedModelRequest.__call__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__init__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__le__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__new__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__or__": true,
+ "google.generativeai.protos.GetTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.GetTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.GetTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.GetTunedModelRequest.from_json": true,
+ "google.generativeai.protos.GetTunedModelRequest.mro": true,
+ "google.generativeai.protos.GetTunedModelRequest.name": true,
+ "google.generativeai.protos.GetTunedModelRequest.pb": true,
+ "google.generativeai.protos.GetTunedModelRequest.serialize": true,
+ "google.generativeai.protos.GetTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.GetTunedModelRequest.to_json": true,
+ "google.generativeai.protos.GetTunedModelRequest.wrap": true,
+ "google.generativeai.protos.GoogleSearchRetrieval": false,
+ "google.generativeai.protos.GoogleSearchRetrieval.__call__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__eq__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__ge__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__gt__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__init__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__le__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__lt__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__ne__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__new__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__or__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.__ror__": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.copy_from": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.deserialize": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.dynamic_retrieval_config": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.from_json": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.mro": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.pb": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.serialize": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.to_dict": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.to_json": true,
+ "google.generativeai.protos.GoogleSearchRetrieval.wrap": true,
+ "google.generativeai.protos.GroundingAttribution": false,
+ "google.generativeai.protos.GroundingAttribution.__call__": true,
+ "google.generativeai.protos.GroundingAttribution.__eq__": true,
+ "google.generativeai.protos.GroundingAttribution.__ge__": true,
+ "google.generativeai.protos.GroundingAttribution.__gt__": true,
+ "google.generativeai.protos.GroundingAttribution.__init__": true,
+ "google.generativeai.protos.GroundingAttribution.__le__": true,
+ "google.generativeai.protos.GroundingAttribution.__lt__": true,
+ "google.generativeai.protos.GroundingAttribution.__ne__": true,
+ "google.generativeai.protos.GroundingAttribution.__new__": true,
+ "google.generativeai.protos.GroundingAttribution.__or__": true,
+ "google.generativeai.protos.GroundingAttribution.__ror__": true,
+ "google.generativeai.protos.GroundingAttribution.content": true,
+ "google.generativeai.protos.GroundingAttribution.copy_from": true,
+ "google.generativeai.protos.GroundingAttribution.deserialize": true,
+ "google.generativeai.protos.GroundingAttribution.from_json": true,
+ "google.generativeai.protos.GroundingAttribution.mro": true,
+ "google.generativeai.protos.GroundingAttribution.pb": true,
+ "google.generativeai.protos.GroundingAttribution.serialize": true,
+ "google.generativeai.protos.GroundingAttribution.source_id": true,
+ "google.generativeai.protos.GroundingAttribution.to_dict": true,
+ "google.generativeai.protos.GroundingAttribution.to_json": true,
+ "google.generativeai.protos.GroundingAttribution.wrap": true,
+ "google.generativeai.protos.GroundingChunk": false,
+ "google.generativeai.protos.GroundingChunk.Web": false,
+ "google.generativeai.protos.GroundingChunk.Web.__call__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__eq__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__ge__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__gt__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__init__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__le__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__lt__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__ne__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__new__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__or__": true,
+ "google.generativeai.protos.GroundingChunk.Web.__ror__": true,
+ "google.generativeai.protos.GroundingChunk.Web.copy_from": true,
+ "google.generativeai.protos.GroundingChunk.Web.deserialize": true,
+ "google.generativeai.protos.GroundingChunk.Web.from_json": true,
+ "google.generativeai.protos.GroundingChunk.Web.mro": true,
+ "google.generativeai.protos.GroundingChunk.Web.pb": true,
+ "google.generativeai.protos.GroundingChunk.Web.serialize": true,
+ "google.generativeai.protos.GroundingChunk.Web.title": true,
+ "google.generativeai.protos.GroundingChunk.Web.to_dict": true,
+ "google.generativeai.protos.GroundingChunk.Web.to_json": true,
+ "google.generativeai.protos.GroundingChunk.Web.uri": true,
+ "google.generativeai.protos.GroundingChunk.Web.wrap": true,
+ "google.generativeai.protos.GroundingChunk.__call__": true,
+ "google.generativeai.protos.GroundingChunk.__eq__": true,
+ "google.generativeai.protos.GroundingChunk.__ge__": true,
+ "google.generativeai.protos.GroundingChunk.__gt__": true,
+ "google.generativeai.protos.GroundingChunk.__init__": true,
+ "google.generativeai.protos.GroundingChunk.__le__": true,
+ "google.generativeai.protos.GroundingChunk.__lt__": true,
+ "google.generativeai.protos.GroundingChunk.__ne__": true,
+ "google.generativeai.protos.GroundingChunk.__new__": true,
+ "google.generativeai.protos.GroundingChunk.__or__": true,
+ "google.generativeai.protos.GroundingChunk.__ror__": true,
+ "google.generativeai.protos.GroundingChunk.copy_from": true,
+ "google.generativeai.protos.GroundingChunk.deserialize": true,
+ "google.generativeai.protos.GroundingChunk.from_json": true,
+ "google.generativeai.protos.GroundingChunk.mro": true,
+ "google.generativeai.protos.GroundingChunk.pb": true,
+ "google.generativeai.protos.GroundingChunk.serialize": true,
+ "google.generativeai.protos.GroundingChunk.to_dict": true,
+ "google.generativeai.protos.GroundingChunk.to_json": true,
+ "google.generativeai.protos.GroundingChunk.web": true,
+ "google.generativeai.protos.GroundingChunk.wrap": true,
+ "google.generativeai.protos.GroundingMetadata": false,
+ "google.generativeai.protos.GroundingMetadata.__call__": true,
+ "google.generativeai.protos.GroundingMetadata.__eq__": true,
+ "google.generativeai.protos.GroundingMetadata.__ge__": true,
+ "google.generativeai.protos.GroundingMetadata.__gt__": true,
+ "google.generativeai.protos.GroundingMetadata.__init__": true,
+ "google.generativeai.protos.GroundingMetadata.__le__": true,
+ "google.generativeai.protos.GroundingMetadata.__lt__": true,
+ "google.generativeai.protos.GroundingMetadata.__ne__": true,
+ "google.generativeai.protos.GroundingMetadata.__new__": true,
+ "google.generativeai.protos.GroundingMetadata.__or__": true,
+ "google.generativeai.protos.GroundingMetadata.__ror__": true,
+ "google.generativeai.protos.GroundingMetadata.copy_from": true,
+ "google.generativeai.protos.GroundingMetadata.deserialize": true,
+ "google.generativeai.protos.GroundingMetadata.from_json": true,
+ "google.generativeai.protos.GroundingMetadata.grounding_chunks": true,
+ "google.generativeai.protos.GroundingMetadata.grounding_supports": true,
+ "google.generativeai.protos.GroundingMetadata.mro": true,
+ "google.generativeai.protos.GroundingMetadata.pb": true,
+ "google.generativeai.protos.GroundingMetadata.retrieval_metadata": true,
+ "google.generativeai.protos.GroundingMetadata.search_entry_point": true,
+ "google.generativeai.protos.GroundingMetadata.serialize": true,
+ "google.generativeai.protos.GroundingMetadata.to_dict": true,
+ "google.generativeai.protos.GroundingMetadata.to_json": true,
+ "google.generativeai.protos.GroundingMetadata.wrap": true,
+ "google.generativeai.protos.GroundingPassage": false,
+ "google.generativeai.protos.GroundingPassage.__call__": true,
+ "google.generativeai.protos.GroundingPassage.__eq__": true,
+ "google.generativeai.protos.GroundingPassage.__ge__": true,
+ "google.generativeai.protos.GroundingPassage.__gt__": true,
+ "google.generativeai.protos.GroundingPassage.__init__": true,
+ "google.generativeai.protos.GroundingPassage.__le__": true,
+ "google.generativeai.protos.GroundingPassage.__lt__": true,
+ "google.generativeai.protos.GroundingPassage.__ne__": true,
+ "google.generativeai.protos.GroundingPassage.__new__": true,
+ "google.generativeai.protos.GroundingPassage.__or__": true,
+ "google.generativeai.protos.GroundingPassage.__ror__": true,
+ "google.generativeai.protos.GroundingPassage.content": true,
+ "google.generativeai.protos.GroundingPassage.copy_from": true,
+ "google.generativeai.protos.GroundingPassage.deserialize": true,
+ "google.generativeai.protos.GroundingPassage.from_json": true,
+ "google.generativeai.protos.GroundingPassage.id": true,
+ "google.generativeai.protos.GroundingPassage.mro": true,
+ "google.generativeai.protos.GroundingPassage.pb": true,
+ "google.generativeai.protos.GroundingPassage.serialize": true,
+ "google.generativeai.protos.GroundingPassage.to_dict": true,
+ "google.generativeai.protos.GroundingPassage.to_json": true,
+ "google.generativeai.protos.GroundingPassage.wrap": true,
+ "google.generativeai.protos.GroundingPassages": false,
+ "google.generativeai.protos.GroundingPassages.__call__": true,
+ "google.generativeai.protos.GroundingPassages.__eq__": true,
+ "google.generativeai.protos.GroundingPassages.__ge__": true,
+ "google.generativeai.protos.GroundingPassages.__gt__": true,
+ "google.generativeai.protos.GroundingPassages.__init__": true,
+ "google.generativeai.protos.GroundingPassages.__le__": true,
+ "google.generativeai.protos.GroundingPassages.__lt__": true,
+ "google.generativeai.protos.GroundingPassages.__ne__": true,
+ "google.generativeai.protos.GroundingPassages.__new__": true,
+ "google.generativeai.protos.GroundingPassages.__or__": true,
+ "google.generativeai.protos.GroundingPassages.__ror__": true,
+ "google.generativeai.protos.GroundingPassages.copy_from": true,
+ "google.generativeai.protos.GroundingPassages.deserialize": true,
+ "google.generativeai.protos.GroundingPassages.from_json": true,
+ "google.generativeai.protos.GroundingPassages.mro": true,
+ "google.generativeai.protos.GroundingPassages.passages": true,
+ "google.generativeai.protos.GroundingPassages.pb": true,
+ "google.generativeai.protos.GroundingPassages.serialize": true,
+ "google.generativeai.protos.GroundingPassages.to_dict": true,
+ "google.generativeai.protos.GroundingPassages.to_json": true,
+ "google.generativeai.protos.GroundingPassages.wrap": true,
+ "google.generativeai.protos.GroundingSupport": false,
+ "google.generativeai.protos.GroundingSupport.__call__": true,
+ "google.generativeai.protos.GroundingSupport.__eq__": true,
+ "google.generativeai.protos.GroundingSupport.__ge__": true,
+ "google.generativeai.protos.GroundingSupport.__gt__": true,
+ "google.generativeai.protos.GroundingSupport.__init__": true,
+ "google.generativeai.protos.GroundingSupport.__le__": true,
+ "google.generativeai.protos.GroundingSupport.__lt__": true,
+ "google.generativeai.protos.GroundingSupport.__ne__": true,
+ "google.generativeai.protos.GroundingSupport.__new__": true,
+ "google.generativeai.protos.GroundingSupport.__or__": true,
+ "google.generativeai.protos.GroundingSupport.__ror__": true,
+ "google.generativeai.protos.GroundingSupport.confidence_scores": true,
+ "google.generativeai.protos.GroundingSupport.copy_from": true,
+ "google.generativeai.protos.GroundingSupport.deserialize": true,
+ "google.generativeai.protos.GroundingSupport.from_json": true,
+ "google.generativeai.protos.GroundingSupport.grounding_chunk_indices": true,
+ "google.generativeai.protos.GroundingSupport.mro": true,
+ "google.generativeai.protos.GroundingSupport.pb": true,
+ "google.generativeai.protos.GroundingSupport.segment": true,
+ "google.generativeai.protos.GroundingSupport.serialize": true,
+ "google.generativeai.protos.GroundingSupport.to_dict": true,
+ "google.generativeai.protos.GroundingSupport.to_json": true,
+ "google.generativeai.protos.GroundingSupport.wrap": true,
+ "google.generativeai.protos.HarmCategory": false,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_DEROGATORY": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HARASSMENT": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_MEDICAL": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUAL": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_TOXICITY": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true,
+ "google.generativeai.protos.HarmCategory.HARM_CATEGORY_VIOLENCE": true,
+ "google.generativeai.protos.HarmCategory.__abs__": true,
+ "google.generativeai.protos.HarmCategory.__add__": true,
+ "google.generativeai.protos.HarmCategory.__and__": true,
+ "google.generativeai.protos.HarmCategory.__bool__": true,
+ "google.generativeai.protos.HarmCategory.__contains__": true,
+ "google.generativeai.protos.HarmCategory.__eq__": true,
+ "google.generativeai.protos.HarmCategory.__floordiv__": true,
+ "google.generativeai.protos.HarmCategory.__ge__": true,
+ "google.generativeai.protos.HarmCategory.__getitem__": true,
+ "google.generativeai.protos.HarmCategory.__gt__": true,
+ "google.generativeai.protos.HarmCategory.__init__": true,
+ "google.generativeai.protos.HarmCategory.__invert__": true,
+ "google.generativeai.protos.HarmCategory.__iter__": true,
+ "google.generativeai.protos.HarmCategory.__le__": true,
+ "google.generativeai.protos.HarmCategory.__len__": true,
+ "google.generativeai.protos.HarmCategory.__lshift__": true,
+ "google.generativeai.protos.HarmCategory.__lt__": true,
+ "google.generativeai.protos.HarmCategory.__mod__": true,
+ "google.generativeai.protos.HarmCategory.__mul__": true,
+ "google.generativeai.protos.HarmCategory.__ne__": true,
+ "google.generativeai.protos.HarmCategory.__neg__": true,
+ "google.generativeai.protos.HarmCategory.__new__": true,
+ "google.generativeai.protos.HarmCategory.__or__": true,
+ "google.generativeai.protos.HarmCategory.__pos__": true,
+ "google.generativeai.protos.HarmCategory.__pow__": true,
+ "google.generativeai.protos.HarmCategory.__radd__": true,
+ "google.generativeai.protos.HarmCategory.__rand__": true,
+ "google.generativeai.protos.HarmCategory.__rfloordiv__": true,
+ "google.generativeai.protos.HarmCategory.__rlshift__": true,
+ "google.generativeai.protos.HarmCategory.__rmod__": true,
+ "google.generativeai.protos.HarmCategory.__rmul__": true,
+ "google.generativeai.protos.HarmCategory.__ror__": true,
+ "google.generativeai.protos.HarmCategory.__rpow__": true,
+ "google.generativeai.protos.HarmCategory.__rrshift__": true,
+ "google.generativeai.protos.HarmCategory.__rshift__": true,
+ "google.generativeai.protos.HarmCategory.__rsub__": true,
+ "google.generativeai.protos.HarmCategory.__rtruediv__": true,
+ "google.generativeai.protos.HarmCategory.__rxor__": true,
+ "google.generativeai.protos.HarmCategory.__sub__": true,
+ "google.generativeai.protos.HarmCategory.__truediv__": true,
+ "google.generativeai.protos.HarmCategory.__xor__": true,
+ "google.generativeai.protos.HarmCategory.as_integer_ratio": true,
+ "google.generativeai.protos.HarmCategory.bit_count": true,
+ "google.generativeai.protos.HarmCategory.bit_length": true,
+ "google.generativeai.protos.HarmCategory.conjugate": true,
+ "google.generativeai.protos.HarmCategory.denominator": true,
+ "google.generativeai.protos.HarmCategory.from_bytes": true,
+ "google.generativeai.protos.HarmCategory.imag": true,
+ "google.generativeai.protos.HarmCategory.is_integer": true,
+ "google.generativeai.protos.HarmCategory.numerator": true,
+ "google.generativeai.protos.HarmCategory.real": true,
+ "google.generativeai.protos.HarmCategory.to_bytes": true,
+ "google.generativeai.protos.Hyperparameters": false,
+ "google.generativeai.protos.Hyperparameters.__call__": true,
+ "google.generativeai.protos.Hyperparameters.__eq__": true,
+ "google.generativeai.protos.Hyperparameters.__ge__": true,
+ "google.generativeai.protos.Hyperparameters.__gt__": true,
+ "google.generativeai.protos.Hyperparameters.__init__": true,
+ "google.generativeai.protos.Hyperparameters.__le__": true,
+ "google.generativeai.protos.Hyperparameters.__lt__": true,
+ "google.generativeai.protos.Hyperparameters.__ne__": true,
+ "google.generativeai.protos.Hyperparameters.__new__": true,
+ "google.generativeai.protos.Hyperparameters.__or__": true,
+ "google.generativeai.protos.Hyperparameters.__ror__": true,
+ "google.generativeai.protos.Hyperparameters.batch_size": true,
+ "google.generativeai.protos.Hyperparameters.copy_from": true,
+ "google.generativeai.protos.Hyperparameters.deserialize": true,
+ "google.generativeai.protos.Hyperparameters.epoch_count": true,
+ "google.generativeai.protos.Hyperparameters.from_json": true,
+ "google.generativeai.protos.Hyperparameters.learning_rate": true,
+ "google.generativeai.protos.Hyperparameters.learning_rate_multiplier": true,
+ "google.generativeai.protos.Hyperparameters.mro": true,
+ "google.generativeai.protos.Hyperparameters.pb": true,
+ "google.generativeai.protos.Hyperparameters.serialize": true,
+ "google.generativeai.protos.Hyperparameters.to_dict": true,
+ "google.generativeai.protos.Hyperparameters.to_json": true,
+ "google.generativeai.protos.Hyperparameters.wrap": true,
+ "google.generativeai.protos.ListCachedContentsRequest": false,
+ "google.generativeai.protos.ListCachedContentsRequest.__call__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__eq__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__ge__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__gt__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__init__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__le__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__lt__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__ne__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__new__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__or__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.__ror__": true,
+ "google.generativeai.protos.ListCachedContentsRequest.copy_from": true,
+ "google.generativeai.protos.ListCachedContentsRequest.deserialize": true,
+ "google.generativeai.protos.ListCachedContentsRequest.from_json": true,
+ "google.generativeai.protos.ListCachedContentsRequest.mro": true,
+ "google.generativeai.protos.ListCachedContentsRequest.page_size": true,
+ "google.generativeai.protos.ListCachedContentsRequest.page_token": true,
+ "google.generativeai.protos.ListCachedContentsRequest.pb": true,
+ "google.generativeai.protos.ListCachedContentsRequest.serialize": true,
+ "google.generativeai.protos.ListCachedContentsRequest.to_dict": true,
+ "google.generativeai.protos.ListCachedContentsRequest.to_json": true,
+ "google.generativeai.protos.ListCachedContentsRequest.wrap": true,
+ "google.generativeai.protos.ListCachedContentsResponse": false,
+ "google.generativeai.protos.ListCachedContentsResponse.__call__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__eq__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__ge__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__gt__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__init__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__le__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__lt__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__ne__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__new__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__or__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.__ror__": true,
+ "google.generativeai.protos.ListCachedContentsResponse.cached_contents": true,
+ "google.generativeai.protos.ListCachedContentsResponse.copy_from": true,
+ "google.generativeai.protos.ListCachedContentsResponse.deserialize": true,
+ "google.generativeai.protos.ListCachedContentsResponse.from_json": true,
+ "google.generativeai.protos.ListCachedContentsResponse.mro": true,
+ "google.generativeai.protos.ListCachedContentsResponse.next_page_token": true,
+ "google.generativeai.protos.ListCachedContentsResponse.pb": true,
+ "google.generativeai.protos.ListCachedContentsResponse.serialize": true,
+ "google.generativeai.protos.ListCachedContentsResponse.to_dict": true,
+ "google.generativeai.protos.ListCachedContentsResponse.to_json": true,
+ "google.generativeai.protos.ListCachedContentsResponse.wrap": true,
+ "google.generativeai.protos.ListChunksRequest": false,
+ "google.generativeai.protos.ListChunksRequest.__call__": true,
+ "google.generativeai.protos.ListChunksRequest.__eq__": true,
+ "google.generativeai.protos.ListChunksRequest.__ge__": true,
+ "google.generativeai.protos.ListChunksRequest.__gt__": true,
+ "google.generativeai.protos.ListChunksRequest.__init__": true,
+ "google.generativeai.protos.ListChunksRequest.__le__": true,
+ "google.generativeai.protos.ListChunksRequest.__lt__": true,
+ "google.generativeai.protos.ListChunksRequest.__ne__": true,
+ "google.generativeai.protos.ListChunksRequest.__new__": true,
+ "google.generativeai.protos.ListChunksRequest.__or__": true,
+ "google.generativeai.protos.ListChunksRequest.__ror__": true,
+ "google.generativeai.protos.ListChunksRequest.copy_from": true,
+ "google.generativeai.protos.ListChunksRequest.deserialize": true,
+ "google.generativeai.protos.ListChunksRequest.from_json": true,
+ "google.generativeai.protos.ListChunksRequest.mro": true,
+ "google.generativeai.protos.ListChunksRequest.page_size": true,
+ "google.generativeai.protos.ListChunksRequest.page_token": true,
+ "google.generativeai.protos.ListChunksRequest.parent": true,
+ "google.generativeai.protos.ListChunksRequest.pb": true,
+ "google.generativeai.protos.ListChunksRequest.serialize": true,
+ "google.generativeai.protos.ListChunksRequest.to_dict": true,
+ "google.generativeai.protos.ListChunksRequest.to_json": true,
+ "google.generativeai.protos.ListChunksRequest.wrap": true,
+ "google.generativeai.protos.ListChunksResponse": false,
+ "google.generativeai.protos.ListChunksResponse.__call__": true,
+ "google.generativeai.protos.ListChunksResponse.__eq__": true,
+ "google.generativeai.protos.ListChunksResponse.__ge__": true,
+ "google.generativeai.protos.ListChunksResponse.__gt__": true,
+ "google.generativeai.protos.ListChunksResponse.__init__": true,
+ "google.generativeai.protos.ListChunksResponse.__le__": true,
+ "google.generativeai.protos.ListChunksResponse.__lt__": true,
+ "google.generativeai.protos.ListChunksResponse.__ne__": true,
+ "google.generativeai.protos.ListChunksResponse.__new__": true,
+ "google.generativeai.protos.ListChunksResponse.__or__": true,
+ "google.generativeai.protos.ListChunksResponse.__ror__": true,
+ "google.generativeai.protos.ListChunksResponse.chunks": true,
+ "google.generativeai.protos.ListChunksResponse.copy_from": true,
+ "google.generativeai.protos.ListChunksResponse.deserialize": true,
+ "google.generativeai.protos.ListChunksResponse.from_json": true,
+ "google.generativeai.protos.ListChunksResponse.mro": true,
+ "google.generativeai.protos.ListChunksResponse.next_page_token": true,
+ "google.generativeai.protos.ListChunksResponse.pb": true,
+ "google.generativeai.protos.ListChunksResponse.serialize": true,
+ "google.generativeai.protos.ListChunksResponse.to_dict": true,
+ "google.generativeai.protos.ListChunksResponse.to_json": true,
+ "google.generativeai.protos.ListChunksResponse.wrap": true,
+ "google.generativeai.protos.ListCorporaRequest": false,
+ "google.generativeai.protos.ListCorporaRequest.__call__": true,
+ "google.generativeai.protos.ListCorporaRequest.__eq__": true,
+ "google.generativeai.protos.ListCorporaRequest.__ge__": true,
+ "google.generativeai.protos.ListCorporaRequest.__gt__": true,
+ "google.generativeai.protos.ListCorporaRequest.__init__": true,
+ "google.generativeai.protos.ListCorporaRequest.__le__": true,
+ "google.generativeai.protos.ListCorporaRequest.__lt__": true,
+ "google.generativeai.protos.ListCorporaRequest.__ne__": true,
+ "google.generativeai.protos.ListCorporaRequest.__new__": true,
+ "google.generativeai.protos.ListCorporaRequest.__or__": true,
+ "google.generativeai.protos.ListCorporaRequest.__ror__": true,
+ "google.generativeai.protos.ListCorporaRequest.copy_from": true,
+ "google.generativeai.protos.ListCorporaRequest.deserialize": true,
+ "google.generativeai.protos.ListCorporaRequest.from_json": true,
+ "google.generativeai.protos.ListCorporaRequest.mro": true,
+ "google.generativeai.protos.ListCorporaRequest.page_size": true,
+ "google.generativeai.protos.ListCorporaRequest.page_token": true,
+ "google.generativeai.protos.ListCorporaRequest.pb": true,
+ "google.generativeai.protos.ListCorporaRequest.serialize": true,
+ "google.generativeai.protos.ListCorporaRequest.to_dict": true,
+ "google.generativeai.protos.ListCorporaRequest.to_json": true,
+ "google.generativeai.protos.ListCorporaRequest.wrap": true,
+ "google.generativeai.protos.ListCorporaResponse": false,
+ "google.generativeai.protos.ListCorporaResponse.__call__": true,
+ "google.generativeai.protos.ListCorporaResponse.__eq__": true,
+ "google.generativeai.protos.ListCorporaResponse.__ge__": true,
+ "google.generativeai.protos.ListCorporaResponse.__gt__": true,
+ "google.generativeai.protos.ListCorporaResponse.__init__": true,
+ "google.generativeai.protos.ListCorporaResponse.__le__": true,
+ "google.generativeai.protos.ListCorporaResponse.__lt__": true,
+ "google.generativeai.protos.ListCorporaResponse.__ne__": true,
+ "google.generativeai.protos.ListCorporaResponse.__new__": true,
+ "google.generativeai.protos.ListCorporaResponse.__or__": true,
+ "google.generativeai.protos.ListCorporaResponse.__ror__": true,
+ "google.generativeai.protos.ListCorporaResponse.copy_from": true,
+ "google.generativeai.protos.ListCorporaResponse.corpora": true,
+ "google.generativeai.protos.ListCorporaResponse.deserialize": true,
+ "google.generativeai.protos.ListCorporaResponse.from_json": true,
+ "google.generativeai.protos.ListCorporaResponse.mro": true,
+ "google.generativeai.protos.ListCorporaResponse.next_page_token": true,
+ "google.generativeai.protos.ListCorporaResponse.pb": true,
+ "google.generativeai.protos.ListCorporaResponse.serialize": true,
+ "google.generativeai.protos.ListCorporaResponse.to_dict": true,
+ "google.generativeai.protos.ListCorporaResponse.to_json": true,
+ "google.generativeai.protos.ListCorporaResponse.wrap": true,
+ "google.generativeai.protos.ListDocumentsRequest": false,
+ "google.generativeai.protos.ListDocumentsRequest.__call__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__eq__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__ge__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__gt__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__init__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__le__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__lt__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__ne__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__new__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__or__": true,
+ "google.generativeai.protos.ListDocumentsRequest.__ror__": true,
+ "google.generativeai.protos.ListDocumentsRequest.copy_from": true,
+ "google.generativeai.protos.ListDocumentsRequest.deserialize": true,
+ "google.generativeai.protos.ListDocumentsRequest.from_json": true,
+ "google.generativeai.protos.ListDocumentsRequest.mro": true,
+ "google.generativeai.protos.ListDocumentsRequest.page_size": true,
+ "google.generativeai.protos.ListDocumentsRequest.page_token": true,
+ "google.generativeai.protos.ListDocumentsRequest.parent": true,
+ "google.generativeai.protos.ListDocumentsRequest.pb": true,
+ "google.generativeai.protos.ListDocumentsRequest.serialize": true,
+ "google.generativeai.protos.ListDocumentsRequest.to_dict": true,
+ "google.generativeai.protos.ListDocumentsRequest.to_json": true,
+ "google.generativeai.protos.ListDocumentsRequest.wrap": true,
+ "google.generativeai.protos.ListDocumentsResponse": false,
+ "google.generativeai.protos.ListDocumentsResponse.__call__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__eq__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__ge__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__gt__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__init__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__le__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__lt__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__ne__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__new__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__or__": true,
+ "google.generativeai.protos.ListDocumentsResponse.__ror__": true,
+ "google.generativeai.protos.ListDocumentsResponse.copy_from": true,
+ "google.generativeai.protos.ListDocumentsResponse.deserialize": true,
+ "google.generativeai.protos.ListDocumentsResponse.documents": true,
+ "google.generativeai.protos.ListDocumentsResponse.from_json": true,
+ "google.generativeai.protos.ListDocumentsResponse.mro": true,
+ "google.generativeai.protos.ListDocumentsResponse.next_page_token": true,
+ "google.generativeai.protos.ListDocumentsResponse.pb": true,
+ "google.generativeai.protos.ListDocumentsResponse.serialize": true,
+ "google.generativeai.protos.ListDocumentsResponse.to_dict": true,
+ "google.generativeai.protos.ListDocumentsResponse.to_json": true,
+ "google.generativeai.protos.ListDocumentsResponse.wrap": true,
+ "google.generativeai.protos.ListFilesRequest": false,
+ "google.generativeai.protos.ListFilesRequest.__call__": true,
+ "google.generativeai.protos.ListFilesRequest.__eq__": true,
+ "google.generativeai.protos.ListFilesRequest.__ge__": true,
+ "google.generativeai.protos.ListFilesRequest.__gt__": true,
+ "google.generativeai.protos.ListFilesRequest.__init__": true,
+ "google.generativeai.protos.ListFilesRequest.__le__": true,
+ "google.generativeai.protos.ListFilesRequest.__lt__": true,
+ "google.generativeai.protos.ListFilesRequest.__ne__": true,
+ "google.generativeai.protos.ListFilesRequest.__new__": true,
+ "google.generativeai.protos.ListFilesRequest.__or__": true,
+ "google.generativeai.protos.ListFilesRequest.__ror__": true,
+ "google.generativeai.protos.ListFilesRequest.copy_from": true,
+ "google.generativeai.protos.ListFilesRequest.deserialize": true,
+ "google.generativeai.protos.ListFilesRequest.from_json": true,
+ "google.generativeai.protos.ListFilesRequest.mro": true,
+ "google.generativeai.protos.ListFilesRequest.page_size": true,
+ "google.generativeai.protos.ListFilesRequest.page_token": true,
+ "google.generativeai.protos.ListFilesRequest.pb": true,
+ "google.generativeai.protos.ListFilesRequest.serialize": true,
+ "google.generativeai.protos.ListFilesRequest.to_dict": true,
+ "google.generativeai.protos.ListFilesRequest.to_json": true,
+ "google.generativeai.protos.ListFilesRequest.wrap": true,
+ "google.generativeai.protos.ListFilesResponse": false,
+ "google.generativeai.protos.ListFilesResponse.__call__": true,
+ "google.generativeai.protos.ListFilesResponse.__eq__": true,
+ "google.generativeai.protos.ListFilesResponse.__ge__": true,
+ "google.generativeai.protos.ListFilesResponse.__gt__": true,
+ "google.generativeai.protos.ListFilesResponse.__init__": true,
+ "google.generativeai.protos.ListFilesResponse.__le__": true,
+ "google.generativeai.protos.ListFilesResponse.__lt__": true,
+ "google.generativeai.protos.ListFilesResponse.__ne__": true,
+ "google.generativeai.protos.ListFilesResponse.__new__": true,
+ "google.generativeai.protos.ListFilesResponse.__or__": true,
+ "google.generativeai.protos.ListFilesResponse.__ror__": true,
+ "google.generativeai.protos.ListFilesResponse.copy_from": true,
+ "google.generativeai.protos.ListFilesResponse.deserialize": true,
+ "google.generativeai.protos.ListFilesResponse.files": true,
+ "google.generativeai.protos.ListFilesResponse.from_json": true,
+ "google.generativeai.protos.ListFilesResponse.mro": true,
+ "google.generativeai.protos.ListFilesResponse.next_page_token": true,
+ "google.generativeai.protos.ListFilesResponse.pb": true,
+ "google.generativeai.protos.ListFilesResponse.serialize": true,
+ "google.generativeai.protos.ListFilesResponse.to_dict": true,
+ "google.generativeai.protos.ListFilesResponse.to_json": true,
+ "google.generativeai.protos.ListFilesResponse.wrap": true,
+ "google.generativeai.protos.ListModelsRequest": false,
+ "google.generativeai.protos.ListModelsRequest.__call__": true,
+ "google.generativeai.protos.ListModelsRequest.__eq__": true,
+ "google.generativeai.protos.ListModelsRequest.__ge__": true,
+ "google.generativeai.protos.ListModelsRequest.__gt__": true,
+ "google.generativeai.protos.ListModelsRequest.__init__": true,
+ "google.generativeai.protos.ListModelsRequest.__le__": true,
+ "google.generativeai.protos.ListModelsRequest.__lt__": true,
+ "google.generativeai.protos.ListModelsRequest.__ne__": true,
+ "google.generativeai.protos.ListModelsRequest.__new__": true,
+ "google.generativeai.protos.ListModelsRequest.__or__": true,
+ "google.generativeai.protos.ListModelsRequest.__ror__": true,
+ "google.generativeai.protos.ListModelsRequest.copy_from": true,
+ "google.generativeai.protos.ListModelsRequest.deserialize": true,
+ "google.generativeai.protos.ListModelsRequest.from_json": true,
+ "google.generativeai.protos.ListModelsRequest.mro": true,
+ "google.generativeai.protos.ListModelsRequest.page_size": true,
+ "google.generativeai.protos.ListModelsRequest.page_token": true,
+ "google.generativeai.protos.ListModelsRequest.pb": true,
+ "google.generativeai.protos.ListModelsRequest.serialize": true,
+ "google.generativeai.protos.ListModelsRequest.to_dict": true,
+ "google.generativeai.protos.ListModelsRequest.to_json": true,
+ "google.generativeai.protos.ListModelsRequest.wrap": true,
+ "google.generativeai.protos.ListModelsResponse": false,
+ "google.generativeai.protos.ListModelsResponse.__call__": true,
+ "google.generativeai.protos.ListModelsResponse.__eq__": true,
+ "google.generativeai.protos.ListModelsResponse.__ge__": true,
+ "google.generativeai.protos.ListModelsResponse.__gt__": true,
+ "google.generativeai.protos.ListModelsResponse.__init__": true,
+ "google.generativeai.protos.ListModelsResponse.__le__": true,
+ "google.generativeai.protos.ListModelsResponse.__lt__": true,
+ "google.generativeai.protos.ListModelsResponse.__ne__": true,
+ "google.generativeai.protos.ListModelsResponse.__new__": true,
+ "google.generativeai.protos.ListModelsResponse.__or__": true,
+ "google.generativeai.protos.ListModelsResponse.__ror__": true,
+ "google.generativeai.protos.ListModelsResponse.copy_from": true,
+ "google.generativeai.protos.ListModelsResponse.deserialize": true,
+ "google.generativeai.protos.ListModelsResponse.from_json": true,
+ "google.generativeai.protos.ListModelsResponse.models": true,
+ "google.generativeai.protos.ListModelsResponse.mro": true,
+ "google.generativeai.protos.ListModelsResponse.next_page_token": true,
+ "google.generativeai.protos.ListModelsResponse.pb": true,
+ "google.generativeai.protos.ListModelsResponse.serialize": true,
+ "google.generativeai.protos.ListModelsResponse.to_dict": true,
+ "google.generativeai.protos.ListModelsResponse.to_json": true,
+ "google.generativeai.protos.ListModelsResponse.wrap": true,
+ "google.generativeai.protos.ListPermissionsRequest": false,
+ "google.generativeai.protos.ListPermissionsRequest.__call__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__eq__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__ge__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__gt__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__init__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__le__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__lt__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__ne__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__new__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__or__": true,
+ "google.generativeai.protos.ListPermissionsRequest.__ror__": true,
+ "google.generativeai.protos.ListPermissionsRequest.copy_from": true,
+ "google.generativeai.protos.ListPermissionsRequest.deserialize": true,
+ "google.generativeai.protos.ListPermissionsRequest.from_json": true,
+ "google.generativeai.protos.ListPermissionsRequest.mro": true,
+ "google.generativeai.protos.ListPermissionsRequest.page_size": true,
+ "google.generativeai.protos.ListPermissionsRequest.page_token": true,
+ "google.generativeai.protos.ListPermissionsRequest.parent": true,
+ "google.generativeai.protos.ListPermissionsRequest.pb": true,
+ "google.generativeai.protos.ListPermissionsRequest.serialize": true,
+ "google.generativeai.protos.ListPermissionsRequest.to_dict": true,
+ "google.generativeai.protos.ListPermissionsRequest.to_json": true,
+ "google.generativeai.protos.ListPermissionsRequest.wrap": true,
+ "google.generativeai.protos.ListPermissionsResponse": false,
+ "google.generativeai.protos.ListPermissionsResponse.__call__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__eq__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__ge__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__gt__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__init__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__le__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__lt__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__ne__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__new__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__or__": true,
+ "google.generativeai.protos.ListPermissionsResponse.__ror__": true,
+ "google.generativeai.protos.ListPermissionsResponse.copy_from": true,
+ "google.generativeai.protos.ListPermissionsResponse.deserialize": true,
+ "google.generativeai.protos.ListPermissionsResponse.from_json": true,
+ "google.generativeai.protos.ListPermissionsResponse.mro": true,
+ "google.generativeai.protos.ListPermissionsResponse.next_page_token": true,
+ "google.generativeai.protos.ListPermissionsResponse.pb": true,
+ "google.generativeai.protos.ListPermissionsResponse.permissions": true,
+ "google.generativeai.protos.ListPermissionsResponse.serialize": true,
+ "google.generativeai.protos.ListPermissionsResponse.to_dict": true,
+ "google.generativeai.protos.ListPermissionsResponse.to_json": true,
+ "google.generativeai.protos.ListPermissionsResponse.wrap": true,
+ "google.generativeai.protos.ListTunedModelsRequest": false,
+ "google.generativeai.protos.ListTunedModelsRequest.__call__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__eq__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__ge__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__gt__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__init__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__le__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__lt__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__ne__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__new__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__or__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.__ror__": true,
+ "google.generativeai.protos.ListTunedModelsRequest.copy_from": true,
+ "google.generativeai.protos.ListTunedModelsRequest.deserialize": true,
+ "google.generativeai.protos.ListTunedModelsRequest.filter": true,
+ "google.generativeai.protos.ListTunedModelsRequest.from_json": true,
+ "google.generativeai.protos.ListTunedModelsRequest.mro": true,
+ "google.generativeai.protos.ListTunedModelsRequest.page_size": true,
+ "google.generativeai.protos.ListTunedModelsRequest.page_token": true,
+ "google.generativeai.protos.ListTunedModelsRequest.pb": true,
+ "google.generativeai.protos.ListTunedModelsRequest.serialize": true,
+ "google.generativeai.protos.ListTunedModelsRequest.to_dict": true,
+ "google.generativeai.protos.ListTunedModelsRequest.to_json": true,
+ "google.generativeai.protos.ListTunedModelsRequest.wrap": true,
+ "google.generativeai.protos.ListTunedModelsResponse": false,
+ "google.generativeai.protos.ListTunedModelsResponse.__call__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__eq__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__ge__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__gt__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__init__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__le__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__lt__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__ne__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__new__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__or__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.__ror__": true,
+ "google.generativeai.protos.ListTunedModelsResponse.copy_from": true,
+ "google.generativeai.protos.ListTunedModelsResponse.deserialize": true,
+ "google.generativeai.protos.ListTunedModelsResponse.from_json": true,
+ "google.generativeai.protos.ListTunedModelsResponse.mro": true,
+ "google.generativeai.protos.ListTunedModelsResponse.next_page_token": true,
+ "google.generativeai.protos.ListTunedModelsResponse.pb": true,
+ "google.generativeai.protos.ListTunedModelsResponse.serialize": true,
+ "google.generativeai.protos.ListTunedModelsResponse.to_dict": true,
+ "google.generativeai.protos.ListTunedModelsResponse.to_json": true,
+ "google.generativeai.protos.ListTunedModelsResponse.tuned_models": true,
+ "google.generativeai.protos.ListTunedModelsResponse.wrap": true,
+ "google.generativeai.protos.LogprobsResult": false,
+ "google.generativeai.protos.LogprobsResult.Candidate": false,
+ "google.generativeai.protos.LogprobsResult.Candidate.__call__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__eq__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__ge__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__gt__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__init__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__le__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__lt__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__ne__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__new__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__or__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.__ror__": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.copy_from": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.deserialize": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.from_json": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.log_probability": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.mro": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.pb": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.serialize": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.to_dict": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.to_json": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.token": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.token_id": true,
+ "google.generativeai.protos.LogprobsResult.Candidate.wrap": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates": false,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__call__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__eq__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__ge__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__gt__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__init__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__le__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__lt__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__ne__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__new__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__or__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.__ror__": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.candidates": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.copy_from": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.deserialize": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.from_json": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.mro": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.pb": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.serialize": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.to_dict": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.to_json": true,
+ "google.generativeai.protos.LogprobsResult.TopCandidates.wrap": true,
+ "google.generativeai.protos.LogprobsResult.__call__": true,
+ "google.generativeai.protos.LogprobsResult.__eq__": true,
+ "google.generativeai.protos.LogprobsResult.__ge__": true,
+ "google.generativeai.protos.LogprobsResult.__gt__": true,
+ "google.generativeai.protos.LogprobsResult.__init__": true,
+ "google.generativeai.protos.LogprobsResult.__le__": true,
+ "google.generativeai.protos.LogprobsResult.__lt__": true,
+ "google.generativeai.protos.LogprobsResult.__ne__": true,
+ "google.generativeai.protos.LogprobsResult.__new__": true,
+ "google.generativeai.protos.LogprobsResult.__or__": true,
+ "google.generativeai.protos.LogprobsResult.__ror__": true,
+ "google.generativeai.protos.LogprobsResult.chosen_candidates": true,
+ "google.generativeai.protos.LogprobsResult.copy_from": true,
+ "google.generativeai.protos.LogprobsResult.deserialize": true,
+ "google.generativeai.protos.LogprobsResult.from_json": true,
+ "google.generativeai.protos.LogprobsResult.mro": true,
+ "google.generativeai.protos.LogprobsResult.pb": true,
+ "google.generativeai.protos.LogprobsResult.serialize": true,
+ "google.generativeai.protos.LogprobsResult.to_dict": true,
+ "google.generativeai.protos.LogprobsResult.to_json": true,
+ "google.generativeai.protos.LogprobsResult.top_candidates": true,
+ "google.generativeai.protos.LogprobsResult.wrap": true,
+ "google.generativeai.protos.Message": false,
+ "google.generativeai.protos.Message.__call__": true,
+ "google.generativeai.protos.Message.__eq__": true,
+ "google.generativeai.protos.Message.__ge__": true,
+ "google.generativeai.protos.Message.__gt__": true,
+ "google.generativeai.protos.Message.__init__": true,
+ "google.generativeai.protos.Message.__le__": true,
+ "google.generativeai.protos.Message.__lt__": true,
+ "google.generativeai.protos.Message.__ne__": true,
+ "google.generativeai.protos.Message.__new__": true,
+ "google.generativeai.protos.Message.__or__": true,
+ "google.generativeai.protos.Message.__ror__": true,
+ "google.generativeai.protos.Message.author": true,
+ "google.generativeai.protos.Message.citation_metadata": true,
+ "google.generativeai.protos.Message.content": true,
+ "google.generativeai.protos.Message.copy_from": true,
+ "google.generativeai.protos.Message.deserialize": true,
+ "google.generativeai.protos.Message.from_json": true,
+ "google.generativeai.protos.Message.mro": true,
+ "google.generativeai.protos.Message.pb": true,
+ "google.generativeai.protos.Message.serialize": true,
+ "google.generativeai.protos.Message.to_dict": true,
+ "google.generativeai.protos.Message.to_json": true,
+ "google.generativeai.protos.Message.wrap": true,
+ "google.generativeai.protos.MessagePrompt": false,
+ "google.generativeai.protos.MessagePrompt.__call__": true,
+ "google.generativeai.protos.MessagePrompt.__eq__": true,
+ "google.generativeai.protos.MessagePrompt.__ge__": true,
+ "google.generativeai.protos.MessagePrompt.__gt__": true,
+ "google.generativeai.protos.MessagePrompt.__init__": true,
+ "google.generativeai.protos.MessagePrompt.__le__": true,
+ "google.generativeai.protos.MessagePrompt.__lt__": true,
+ "google.generativeai.protos.MessagePrompt.__ne__": true,
+ "google.generativeai.protos.MessagePrompt.__new__": true,
+ "google.generativeai.protos.MessagePrompt.__or__": true,
+ "google.generativeai.protos.MessagePrompt.__ror__": true,
+ "google.generativeai.protos.MessagePrompt.context": true,
+ "google.generativeai.protos.MessagePrompt.copy_from": true,
+ "google.generativeai.protos.MessagePrompt.deserialize": true,
+ "google.generativeai.protos.MessagePrompt.examples": true,
+ "google.generativeai.protos.MessagePrompt.from_json": true,
+ "google.generativeai.protos.MessagePrompt.messages": true,
+ "google.generativeai.protos.MessagePrompt.mro": true,
+ "google.generativeai.protos.MessagePrompt.pb": true,
+ "google.generativeai.protos.MessagePrompt.serialize": true,
+ "google.generativeai.protos.MessagePrompt.to_dict": true,
+ "google.generativeai.protos.MessagePrompt.to_json": true,
+ "google.generativeai.protos.MessagePrompt.wrap": true,
+ "google.generativeai.protos.MetadataFilter": false,
+ "google.generativeai.protos.MetadataFilter.__call__": true,
+ "google.generativeai.protos.MetadataFilter.__eq__": true,
+ "google.generativeai.protos.MetadataFilter.__ge__": true,
+ "google.generativeai.protos.MetadataFilter.__gt__": true,
+ "google.generativeai.protos.MetadataFilter.__init__": true,
+ "google.generativeai.protos.MetadataFilter.__le__": true,
+ "google.generativeai.protos.MetadataFilter.__lt__": true,
+ "google.generativeai.protos.MetadataFilter.__ne__": true,
+ "google.generativeai.protos.MetadataFilter.__new__": true,
+ "google.generativeai.protos.MetadataFilter.__or__": true,
+ "google.generativeai.protos.MetadataFilter.__ror__": true,
+ "google.generativeai.protos.MetadataFilter.conditions": true,
+ "google.generativeai.protos.MetadataFilter.copy_from": true,
+ "google.generativeai.protos.MetadataFilter.deserialize": true,
+ "google.generativeai.protos.MetadataFilter.from_json": true,
+ "google.generativeai.protos.MetadataFilter.key": true,
+ "google.generativeai.protos.MetadataFilter.mro": true,
+ "google.generativeai.protos.MetadataFilter.pb": true,
+ "google.generativeai.protos.MetadataFilter.serialize": true,
+ "google.generativeai.protos.MetadataFilter.to_dict": true,
+ "google.generativeai.protos.MetadataFilter.to_json": true,
+ "google.generativeai.protos.MetadataFilter.wrap": true,
+ "google.generativeai.protos.Model": false,
+ "google.generativeai.protos.Model.__call__": true,
+ "google.generativeai.protos.Model.__eq__": true,
+ "google.generativeai.protos.Model.__ge__": true,
+ "google.generativeai.protos.Model.__gt__": true,
+ "google.generativeai.protos.Model.__init__": true,
+ "google.generativeai.protos.Model.__le__": true,
+ "google.generativeai.protos.Model.__lt__": true,
+ "google.generativeai.protos.Model.__ne__": true,
+ "google.generativeai.protos.Model.__new__": true,
+ "google.generativeai.protos.Model.__or__": true,
+ "google.generativeai.protos.Model.__ror__": true,
+ "google.generativeai.protos.Model.base_model_id": true,
+ "google.generativeai.protos.Model.copy_from": true,
+ "google.generativeai.protos.Model.description": true,
+ "google.generativeai.protos.Model.deserialize": true,
+ "google.generativeai.protos.Model.display_name": true,
+ "google.generativeai.protos.Model.from_json": true,
+ "google.generativeai.protos.Model.input_token_limit": true,
+ "google.generativeai.protos.Model.max_temperature": true,
+ "google.generativeai.protos.Model.mro": true,
+ "google.generativeai.protos.Model.name": true,
+ "google.generativeai.protos.Model.output_token_limit": true,
+ "google.generativeai.protos.Model.pb": true,
+ "google.generativeai.protos.Model.serialize": true,
+ "google.generativeai.protos.Model.supported_generation_methods": true,
+ "google.generativeai.protos.Model.temperature": true,
+ "google.generativeai.protos.Model.to_dict": true,
+ "google.generativeai.protos.Model.to_json": true,
+ "google.generativeai.protos.Model.top_k": true,
+ "google.generativeai.protos.Model.top_p": true,
+ "google.generativeai.protos.Model.version": true,
+ "google.generativeai.protos.Model.wrap": true,
+ "google.generativeai.protos.Part": false,
+ "google.generativeai.protos.Part.__call__": true,
+ "google.generativeai.protos.Part.__eq__": true,
+ "google.generativeai.protos.Part.__ge__": true,
+ "google.generativeai.protos.Part.__gt__": true,
+ "google.generativeai.protos.Part.__init__": true,
+ "google.generativeai.protos.Part.__le__": true,
+ "google.generativeai.protos.Part.__lt__": true,
+ "google.generativeai.protos.Part.__ne__": true,
+ "google.generativeai.protos.Part.__new__": true,
+ "google.generativeai.protos.Part.__or__": true,
+ "google.generativeai.protos.Part.__ror__": true,
+ "google.generativeai.protos.Part.code_execution_result": true,
+ "google.generativeai.protos.Part.copy_from": true,
+ "google.generativeai.protos.Part.deserialize": true,
+ "google.generativeai.protos.Part.executable_code": true,
+ "google.generativeai.protos.Part.file_data": true,
+ "google.generativeai.protos.Part.from_json": true,
+ "google.generativeai.protos.Part.function_call": true,
+ "google.generativeai.protos.Part.function_response": true,
+ "google.generativeai.protos.Part.inline_data": true,
+ "google.generativeai.protos.Part.mro": true,
+ "google.generativeai.protos.Part.pb": true,
+ "google.generativeai.protos.Part.serialize": true,
+ "google.generativeai.protos.Part.text": true,
+ "google.generativeai.protos.Part.to_dict": true,
+ "google.generativeai.protos.Part.to_json": true,
+ "google.generativeai.protos.Part.wrap": true,
+ "google.generativeai.protos.Permission": false,
+ "google.generativeai.protos.Permission.GranteeType": false,
+ "google.generativeai.protos.Permission.GranteeType.EVERYONE": true,
+ "google.generativeai.protos.Permission.GranteeType.GRANTEE_TYPE_UNSPECIFIED": true,
+ "google.generativeai.protos.Permission.GranteeType.GROUP": true,
+ "google.generativeai.protos.Permission.GranteeType.USER": true,
+ "google.generativeai.protos.Permission.GranteeType.__abs__": true,
+ "google.generativeai.protos.Permission.GranteeType.__add__": true,
+ "google.generativeai.protos.Permission.GranteeType.__and__": true,
+ "google.generativeai.protos.Permission.GranteeType.__bool__": true,
+ "google.generativeai.protos.Permission.GranteeType.__contains__": true,
+ "google.generativeai.protos.Permission.GranteeType.__eq__": true,
+ "google.generativeai.protos.Permission.GranteeType.__floordiv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__ge__": true,
+ "google.generativeai.protos.Permission.GranteeType.__getitem__": true,
+ "google.generativeai.protos.Permission.GranteeType.__gt__": true,
+ "google.generativeai.protos.Permission.GranteeType.__init__": true,
+ "google.generativeai.protos.Permission.GranteeType.__invert__": true,
+ "google.generativeai.protos.Permission.GranteeType.__iter__": true,
+ "google.generativeai.protos.Permission.GranteeType.__le__": true,
+ "google.generativeai.protos.Permission.GranteeType.__len__": true,
+ "google.generativeai.protos.Permission.GranteeType.__lshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__lt__": true,
+ "google.generativeai.protos.Permission.GranteeType.__mod__": true,
+ "google.generativeai.protos.Permission.GranteeType.__mul__": true,
+ "google.generativeai.protos.Permission.GranteeType.__ne__": true,
+ "google.generativeai.protos.Permission.GranteeType.__neg__": true,
+ "google.generativeai.protos.Permission.GranteeType.__new__": true,
+ "google.generativeai.protos.Permission.GranteeType.__or__": true,
+ "google.generativeai.protos.Permission.GranteeType.__pos__": true,
+ "google.generativeai.protos.Permission.GranteeType.__pow__": true,
+ "google.generativeai.protos.Permission.GranteeType.__radd__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rand__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rfloordiv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rlshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rmod__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rmul__": true,
+ "google.generativeai.protos.Permission.GranteeType.__ror__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rpow__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rrshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rshift__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rsub__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rtruediv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__rxor__": true,
+ "google.generativeai.protos.Permission.GranteeType.__sub__": true,
+ "google.generativeai.protos.Permission.GranteeType.__truediv__": true,
+ "google.generativeai.protos.Permission.GranteeType.__xor__": true,
+ "google.generativeai.protos.Permission.GranteeType.as_integer_ratio": true,
+ "google.generativeai.protos.Permission.GranteeType.bit_count": true,
+ "google.generativeai.protos.Permission.GranteeType.bit_length": true,
+ "google.generativeai.protos.Permission.GranteeType.conjugate": true,
+ "google.generativeai.protos.Permission.GranteeType.denominator": true,
+ "google.generativeai.protos.Permission.GranteeType.from_bytes": true,
+ "google.generativeai.protos.Permission.GranteeType.imag": true,
+ "google.generativeai.protos.Permission.GranteeType.is_integer": true,
+ "google.generativeai.protos.Permission.GranteeType.numerator": true,
+ "google.generativeai.protos.Permission.GranteeType.real": true,
+ "google.generativeai.protos.Permission.GranteeType.to_bytes": true,
+ "google.generativeai.protos.Permission.Role": false,
+ "google.generativeai.protos.Permission.Role.OWNER": true,
+ "google.generativeai.protos.Permission.Role.READER": true,
+ "google.generativeai.protos.Permission.Role.ROLE_UNSPECIFIED": true,
+ "google.generativeai.protos.Permission.Role.WRITER": true,
+ "google.generativeai.protos.Permission.Role.__abs__": true,
+ "google.generativeai.protos.Permission.Role.__add__": true,
+ "google.generativeai.protos.Permission.Role.__and__": true,
+ "google.generativeai.protos.Permission.Role.__bool__": true,
+ "google.generativeai.protos.Permission.Role.__contains__": true,
+ "google.generativeai.protos.Permission.Role.__eq__": true,
+ "google.generativeai.protos.Permission.Role.__floordiv__": true,
+ "google.generativeai.protos.Permission.Role.__ge__": true,
+ "google.generativeai.protos.Permission.Role.__getitem__": true,
+ "google.generativeai.protos.Permission.Role.__gt__": true,
+ "google.generativeai.protos.Permission.Role.__init__": true,
+ "google.generativeai.protos.Permission.Role.__invert__": true,
+ "google.generativeai.protos.Permission.Role.__iter__": true,
+ "google.generativeai.protos.Permission.Role.__le__": true,
+ "google.generativeai.protos.Permission.Role.__len__": true,
+ "google.generativeai.protos.Permission.Role.__lshift__": true,
+ "google.generativeai.protos.Permission.Role.__lt__": true,
+ "google.generativeai.protos.Permission.Role.__mod__": true,
+ "google.generativeai.protos.Permission.Role.__mul__": true,
+ "google.generativeai.protos.Permission.Role.__ne__": true,
+ "google.generativeai.protos.Permission.Role.__neg__": true,
+ "google.generativeai.protos.Permission.Role.__new__": true,
+ "google.generativeai.protos.Permission.Role.__or__": true,
+ "google.generativeai.protos.Permission.Role.__pos__": true,
+ "google.generativeai.protos.Permission.Role.__pow__": true,
+ "google.generativeai.protos.Permission.Role.__radd__": true,
+ "google.generativeai.protos.Permission.Role.__rand__": true,
+ "google.generativeai.protos.Permission.Role.__rfloordiv__": true,
+ "google.generativeai.protos.Permission.Role.__rlshift__": true,
+ "google.generativeai.protos.Permission.Role.__rmod__": true,
+ "google.generativeai.protos.Permission.Role.__rmul__": true,
+ "google.generativeai.protos.Permission.Role.__ror__": true,
+ "google.generativeai.protos.Permission.Role.__rpow__": true,
+ "google.generativeai.protos.Permission.Role.__rrshift__": true,
+ "google.generativeai.protos.Permission.Role.__rshift__": true,
+ "google.generativeai.protos.Permission.Role.__rsub__": true,
+ "google.generativeai.protos.Permission.Role.__rtruediv__": true,
+ "google.generativeai.protos.Permission.Role.__rxor__": true,
+ "google.generativeai.protos.Permission.Role.__sub__": true,
+ "google.generativeai.protos.Permission.Role.__truediv__": true,
+ "google.generativeai.protos.Permission.Role.__xor__": true,
+ "google.generativeai.protos.Permission.Role.as_integer_ratio": true,
+ "google.generativeai.protos.Permission.Role.bit_count": true,
+ "google.generativeai.protos.Permission.Role.bit_length": true,
+ "google.generativeai.protos.Permission.Role.conjugate": true,
+ "google.generativeai.protos.Permission.Role.denominator": true,
+ "google.generativeai.protos.Permission.Role.from_bytes": true,
+ "google.generativeai.protos.Permission.Role.imag": true,
+ "google.generativeai.protos.Permission.Role.is_integer": true,
+ "google.generativeai.protos.Permission.Role.numerator": true,
+ "google.generativeai.protos.Permission.Role.real": true,
+ "google.generativeai.protos.Permission.Role.to_bytes": true,
+ "google.generativeai.protos.Permission.__call__": true,
+ "google.generativeai.protos.Permission.__eq__": true,
+ "google.generativeai.protos.Permission.__ge__": true,
+ "google.generativeai.protos.Permission.__gt__": true,
+ "google.generativeai.protos.Permission.__init__": true,
+ "google.generativeai.protos.Permission.__le__": true,
+ "google.generativeai.protos.Permission.__lt__": true,
+ "google.generativeai.protos.Permission.__ne__": true,
+ "google.generativeai.protos.Permission.__new__": true,
+ "google.generativeai.protos.Permission.__or__": true,
+ "google.generativeai.protos.Permission.__ror__": true,
+ "google.generativeai.protos.Permission.copy_from": true,
+ "google.generativeai.protos.Permission.deserialize": true,
+ "google.generativeai.protos.Permission.email_address": true,
+ "google.generativeai.protos.Permission.from_json": true,
+ "google.generativeai.protos.Permission.grantee_type": true,
+ "google.generativeai.protos.Permission.mro": true,
+ "google.generativeai.protos.Permission.name": true,
+ "google.generativeai.protos.Permission.pb": true,
+ "google.generativeai.protos.Permission.role": true,
+ "google.generativeai.protos.Permission.serialize": true,
+ "google.generativeai.protos.Permission.to_dict": true,
+ "google.generativeai.protos.Permission.to_json": true,
+ "google.generativeai.protos.Permission.wrap": true,
+ "google.generativeai.protos.PredictRequest": false,
+ "google.generativeai.protos.PredictRequest.__call__": true,
+ "google.generativeai.protos.PredictRequest.__eq__": true,
+ "google.generativeai.protos.PredictRequest.__ge__": true,
+ "google.generativeai.protos.PredictRequest.__gt__": true,
+ "google.generativeai.protos.PredictRequest.__init__": true,
+ "google.generativeai.protos.PredictRequest.__le__": true,
+ "google.generativeai.protos.PredictRequest.__lt__": true,
+ "google.generativeai.protos.PredictRequest.__ne__": true,
+ "google.generativeai.protos.PredictRequest.__new__": true,
+ "google.generativeai.protos.PredictRequest.__or__": true,
+ "google.generativeai.protos.PredictRequest.__ror__": true,
+ "google.generativeai.protos.PredictRequest.copy_from": true,
+ "google.generativeai.protos.PredictRequest.deserialize": true,
+ "google.generativeai.protos.PredictRequest.from_json": true,
+ "google.generativeai.protos.PredictRequest.instances": true,
+ "google.generativeai.protos.PredictRequest.model": true,
+ "google.generativeai.protos.PredictRequest.mro": true,
+ "google.generativeai.protos.PredictRequest.parameters": true,
+ "google.generativeai.protos.PredictRequest.pb": true,
+ "google.generativeai.protos.PredictRequest.serialize": true,
+ "google.generativeai.protos.PredictRequest.to_dict": true,
+ "google.generativeai.protos.PredictRequest.to_json": true,
+ "google.generativeai.protos.PredictRequest.wrap": true,
+ "google.generativeai.protos.PredictResponse": false,
+ "google.generativeai.protos.PredictResponse.__call__": true,
+ "google.generativeai.protos.PredictResponse.__eq__": true,
+ "google.generativeai.protos.PredictResponse.__ge__": true,
+ "google.generativeai.protos.PredictResponse.__gt__": true,
+ "google.generativeai.protos.PredictResponse.__init__": true,
+ "google.generativeai.protos.PredictResponse.__le__": true,
+ "google.generativeai.protos.PredictResponse.__lt__": true,
+ "google.generativeai.protos.PredictResponse.__ne__": true,
+ "google.generativeai.protos.PredictResponse.__new__": true,
+ "google.generativeai.protos.PredictResponse.__or__": true,
+ "google.generativeai.protos.PredictResponse.__ror__": true,
+ "google.generativeai.protos.PredictResponse.copy_from": true,
+ "google.generativeai.protos.PredictResponse.deserialize": true,
+ "google.generativeai.protos.PredictResponse.from_json": true,
+ "google.generativeai.protos.PredictResponse.mro": true,
+ "google.generativeai.protos.PredictResponse.pb": true,
+ "google.generativeai.protos.PredictResponse.predictions": true,
+ "google.generativeai.protos.PredictResponse.serialize": true,
+ "google.generativeai.protos.PredictResponse.to_dict": true,
+ "google.generativeai.protos.PredictResponse.to_json": true,
+ "google.generativeai.protos.PredictResponse.wrap": true,
+ "google.generativeai.protos.QueryCorpusRequest": false,
+ "google.generativeai.protos.QueryCorpusRequest.__call__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__eq__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__ge__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__gt__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__init__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__le__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__lt__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__ne__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__new__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__or__": true,
+ "google.generativeai.protos.QueryCorpusRequest.__ror__": true,
+ "google.generativeai.protos.QueryCorpusRequest.copy_from": true,
+ "google.generativeai.protos.QueryCorpusRequest.deserialize": true,
+ "google.generativeai.protos.QueryCorpusRequest.from_json": true,
+ "google.generativeai.protos.QueryCorpusRequest.metadata_filters": true,
+ "google.generativeai.protos.QueryCorpusRequest.mro": true,
+ "google.generativeai.protos.QueryCorpusRequest.name": true,
+ "google.generativeai.protos.QueryCorpusRequest.pb": true,
+ "google.generativeai.protos.QueryCorpusRequest.query": true,
+ "google.generativeai.protos.QueryCorpusRequest.results_count": true,
+ "google.generativeai.protos.QueryCorpusRequest.serialize": true,
+ "google.generativeai.protos.QueryCorpusRequest.to_dict": true,
+ "google.generativeai.protos.QueryCorpusRequest.to_json": true,
+ "google.generativeai.protos.QueryCorpusRequest.wrap": true,
+ "google.generativeai.protos.QueryCorpusResponse": false,
+ "google.generativeai.protos.QueryCorpusResponse.__call__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__eq__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__ge__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__gt__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__init__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__le__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__lt__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__ne__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__new__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__or__": true,
+ "google.generativeai.protos.QueryCorpusResponse.__ror__": true,
+ "google.generativeai.protos.QueryCorpusResponse.copy_from": true,
+ "google.generativeai.protos.QueryCorpusResponse.deserialize": true,
+ "google.generativeai.protos.QueryCorpusResponse.from_json": true,
+ "google.generativeai.protos.QueryCorpusResponse.mro": true,
+ "google.generativeai.protos.QueryCorpusResponse.pb": true,
+ "google.generativeai.protos.QueryCorpusResponse.relevant_chunks": true,
+ "google.generativeai.protos.QueryCorpusResponse.serialize": true,
+ "google.generativeai.protos.QueryCorpusResponse.to_dict": true,
+ "google.generativeai.protos.QueryCorpusResponse.to_json": true,
+ "google.generativeai.protos.QueryCorpusResponse.wrap": true,
+ "google.generativeai.protos.QueryDocumentRequest": false,
+ "google.generativeai.protos.QueryDocumentRequest.__call__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__eq__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__ge__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__gt__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__init__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__le__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__lt__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__ne__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__new__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__or__": true,
+ "google.generativeai.protos.QueryDocumentRequest.__ror__": true,
+ "google.generativeai.protos.QueryDocumentRequest.copy_from": true,
+ "google.generativeai.protos.QueryDocumentRequest.deserialize": true,
+ "google.generativeai.protos.QueryDocumentRequest.from_json": true,
+ "google.generativeai.protos.QueryDocumentRequest.metadata_filters": true,
+ "google.generativeai.protos.QueryDocumentRequest.mro": true,
+ "google.generativeai.protos.QueryDocumentRequest.name": true,
+ "google.generativeai.protos.QueryDocumentRequest.pb": true,
+ "google.generativeai.protos.QueryDocumentRequest.query": true,
+ "google.generativeai.protos.QueryDocumentRequest.results_count": true,
+ "google.generativeai.protos.QueryDocumentRequest.serialize": true,
+ "google.generativeai.protos.QueryDocumentRequest.to_dict": true,
+ "google.generativeai.protos.QueryDocumentRequest.to_json": true,
+ "google.generativeai.protos.QueryDocumentRequest.wrap": true,
+ "google.generativeai.protos.QueryDocumentResponse": false,
+ "google.generativeai.protos.QueryDocumentResponse.__call__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__eq__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__ge__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__gt__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__init__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__le__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__lt__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__ne__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__new__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__or__": true,
+ "google.generativeai.protos.QueryDocumentResponse.__ror__": true,
+ "google.generativeai.protos.QueryDocumentResponse.copy_from": true,
+ "google.generativeai.protos.QueryDocumentResponse.deserialize": true,
+ "google.generativeai.protos.QueryDocumentResponse.from_json": true,
+ "google.generativeai.protos.QueryDocumentResponse.mro": true,
+ "google.generativeai.protos.QueryDocumentResponse.pb": true,
+ "google.generativeai.protos.QueryDocumentResponse.relevant_chunks": true,
+ "google.generativeai.protos.QueryDocumentResponse.serialize": true,
+ "google.generativeai.protos.QueryDocumentResponse.to_dict": true,
+ "google.generativeai.protos.QueryDocumentResponse.to_json": true,
+ "google.generativeai.protos.QueryDocumentResponse.wrap": true,
+ "google.generativeai.protos.RelevantChunk": false,
+ "google.generativeai.protos.RelevantChunk.__call__": true,
+ "google.generativeai.protos.RelevantChunk.__eq__": true,
+ "google.generativeai.protos.RelevantChunk.__ge__": true,
+ "google.generativeai.protos.RelevantChunk.__gt__": true,
+ "google.generativeai.protos.RelevantChunk.__init__": true,
+ "google.generativeai.protos.RelevantChunk.__le__": true,
+ "google.generativeai.protos.RelevantChunk.__lt__": true,
+ "google.generativeai.protos.RelevantChunk.__ne__": true,
+ "google.generativeai.protos.RelevantChunk.__new__": true,
+ "google.generativeai.protos.RelevantChunk.__or__": true,
+ "google.generativeai.protos.RelevantChunk.__ror__": true,
+ "google.generativeai.protos.RelevantChunk.chunk": true,
+ "google.generativeai.protos.RelevantChunk.chunk_relevance_score": true,
+ "google.generativeai.protos.RelevantChunk.copy_from": true,
+ "google.generativeai.protos.RelevantChunk.deserialize": true,
+ "google.generativeai.protos.RelevantChunk.from_json": true,
+ "google.generativeai.protos.RelevantChunk.mro": true,
+ "google.generativeai.protos.RelevantChunk.pb": true,
+ "google.generativeai.protos.RelevantChunk.serialize": true,
+ "google.generativeai.protos.RelevantChunk.to_dict": true,
+ "google.generativeai.protos.RelevantChunk.to_json": true,
+ "google.generativeai.protos.RelevantChunk.wrap": true,
+ "google.generativeai.protos.RetrievalMetadata": false,
+ "google.generativeai.protos.RetrievalMetadata.__call__": true,
+ "google.generativeai.protos.RetrievalMetadata.__eq__": true,
+ "google.generativeai.protos.RetrievalMetadata.__ge__": true,
+ "google.generativeai.protos.RetrievalMetadata.__gt__": true,
+ "google.generativeai.protos.RetrievalMetadata.__init__": true,
+ "google.generativeai.protos.RetrievalMetadata.__le__": true,
+ "google.generativeai.protos.RetrievalMetadata.__lt__": true,
+ "google.generativeai.protos.RetrievalMetadata.__ne__": true,
+ "google.generativeai.protos.RetrievalMetadata.__new__": true,
+ "google.generativeai.protos.RetrievalMetadata.__or__": true,
+ "google.generativeai.protos.RetrievalMetadata.__ror__": true,
+ "google.generativeai.protos.RetrievalMetadata.copy_from": true,
+ "google.generativeai.protos.RetrievalMetadata.deserialize": true,
+ "google.generativeai.protos.RetrievalMetadata.from_json": true,
+ "google.generativeai.protos.RetrievalMetadata.google_search_dynamic_retrieval_score": true,
+ "google.generativeai.protos.RetrievalMetadata.mro": true,
+ "google.generativeai.protos.RetrievalMetadata.pb": true,
+ "google.generativeai.protos.RetrievalMetadata.serialize": true,
+ "google.generativeai.protos.RetrievalMetadata.to_dict": true,
+ "google.generativeai.protos.RetrievalMetadata.to_json": true,
+ "google.generativeai.protos.RetrievalMetadata.wrap": true,
+ "google.generativeai.protos.SafetyFeedback": false,
+ "google.generativeai.protos.SafetyFeedback.__call__": true,
+ "google.generativeai.protos.SafetyFeedback.__eq__": true,
+ "google.generativeai.protos.SafetyFeedback.__ge__": true,
+ "google.generativeai.protos.SafetyFeedback.__gt__": true,
+ "google.generativeai.protos.SafetyFeedback.__init__": true,
+ "google.generativeai.protos.SafetyFeedback.__le__": true,
+ "google.generativeai.protos.SafetyFeedback.__lt__": true,
+ "google.generativeai.protos.SafetyFeedback.__ne__": true,
+ "google.generativeai.protos.SafetyFeedback.__new__": true,
+ "google.generativeai.protos.SafetyFeedback.__or__": true,
+ "google.generativeai.protos.SafetyFeedback.__ror__": true,
+ "google.generativeai.protos.SafetyFeedback.copy_from": true,
+ "google.generativeai.protos.SafetyFeedback.deserialize": true,
+ "google.generativeai.protos.SafetyFeedback.from_json": true,
+ "google.generativeai.protos.SafetyFeedback.mro": true,
+ "google.generativeai.protos.SafetyFeedback.pb": true,
+ "google.generativeai.protos.SafetyFeedback.rating": true,
+ "google.generativeai.protos.SafetyFeedback.serialize": true,
+ "google.generativeai.protos.SafetyFeedback.setting": true,
+ "google.generativeai.protos.SafetyFeedback.to_dict": true,
+ "google.generativeai.protos.SafetyFeedback.to_json": true,
+ "google.generativeai.protos.SafetyFeedback.wrap": true,
+ "google.generativeai.protos.SafetyRating": false,
+ "google.generativeai.protos.SafetyRating.HarmProbability": false,
+ "google.generativeai.protos.SafetyRating.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.HIGH": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.LOW": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.MEDIUM": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.NEGLIGIBLE": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__abs__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__add__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__and__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__bool__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__contains__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__eq__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__floordiv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ge__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__getitem__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__gt__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__init__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__invert__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__iter__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__le__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__len__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__lt__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mod__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__mul__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ne__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__neg__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__new__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__or__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pos__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__pow__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__radd__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rand__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rfloordiv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rlshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmod__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rmul__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__ror__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rpow__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rrshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rshift__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rsub__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rtruediv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__rxor__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__sub__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__truediv__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.__xor__": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.as_integer_ratio": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_count": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.bit_length": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.conjugate": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.denominator": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.from_bytes": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.imag": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.is_integer": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.numerator": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.real": true,
+ "google.generativeai.protos.SafetyRating.HarmProbability.to_bytes": true,
+ "google.generativeai.protos.SafetyRating.__call__": true,
+ "google.generativeai.protos.SafetyRating.__eq__": true,
+ "google.generativeai.protos.SafetyRating.__ge__": true,
+ "google.generativeai.protos.SafetyRating.__gt__": true,
+ "google.generativeai.protos.SafetyRating.__init__": true,
+ "google.generativeai.protos.SafetyRating.__le__": true,
+ "google.generativeai.protos.SafetyRating.__lt__": true,
+ "google.generativeai.protos.SafetyRating.__ne__": true,
+ "google.generativeai.protos.SafetyRating.__new__": true,
+ "google.generativeai.protos.SafetyRating.__or__": true,
+ "google.generativeai.protos.SafetyRating.__ror__": true,
+ "google.generativeai.protos.SafetyRating.blocked": true,
+ "google.generativeai.protos.SafetyRating.category": true,
+ "google.generativeai.protos.SafetyRating.copy_from": true,
+ "google.generativeai.protos.SafetyRating.deserialize": true,
+ "google.generativeai.protos.SafetyRating.from_json": true,
+ "google.generativeai.protos.SafetyRating.mro": true,
+ "google.generativeai.protos.SafetyRating.pb": true,
+ "google.generativeai.protos.SafetyRating.probability": true,
+ "google.generativeai.protos.SafetyRating.serialize": true,
+ "google.generativeai.protos.SafetyRating.to_dict": true,
+ "google.generativeai.protos.SafetyRating.to_json": true,
+ "google.generativeai.protos.SafetyRating.wrap": true,
+ "google.generativeai.protos.SafetySetting": false,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold": false,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_NONE": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.OFF": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__abs__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__add__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__and__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__bool__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__contains__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__eq__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__floordiv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ge__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__getitem__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__gt__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__init__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__invert__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__iter__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__le__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__len__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__lt__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mod__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__mul__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ne__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__neg__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__new__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__or__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pos__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__pow__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__radd__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rand__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rfloordiv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rlshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmod__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rmul__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__ror__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rpow__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rrshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rshift__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rsub__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rtruediv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__rxor__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__sub__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__truediv__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.__xor__": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.as_integer_ratio": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_count": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.bit_length": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.conjugate": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.denominator": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.from_bytes": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.imag": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.is_integer": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.numerator": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.real": true,
+ "google.generativeai.protos.SafetySetting.HarmBlockThreshold.to_bytes": true,
+ "google.generativeai.protos.SafetySetting.__call__": true,
+ "google.generativeai.protos.SafetySetting.__eq__": true,
+ "google.generativeai.protos.SafetySetting.__ge__": true,
+ "google.generativeai.protos.SafetySetting.__gt__": true,
+ "google.generativeai.protos.SafetySetting.__init__": true,
+ "google.generativeai.protos.SafetySetting.__le__": true,
+ "google.generativeai.protos.SafetySetting.__lt__": true,
+ "google.generativeai.protos.SafetySetting.__ne__": true,
+ "google.generativeai.protos.SafetySetting.__new__": true,
+ "google.generativeai.protos.SafetySetting.__or__": true,
+ "google.generativeai.protos.SafetySetting.__ror__": true,
+ "google.generativeai.protos.SafetySetting.category": true,
+ "google.generativeai.protos.SafetySetting.copy_from": true,
+ "google.generativeai.protos.SafetySetting.deserialize": true,
+ "google.generativeai.protos.SafetySetting.from_json": true,
+ "google.generativeai.protos.SafetySetting.mro": true,
+ "google.generativeai.protos.SafetySetting.pb": true,
+ "google.generativeai.protos.SafetySetting.serialize": true,
+ "google.generativeai.protos.SafetySetting.threshold": true,
+ "google.generativeai.protos.SafetySetting.to_dict": true,
+ "google.generativeai.protos.SafetySetting.to_json": true,
+ "google.generativeai.protos.SafetySetting.wrap": true,
+ "google.generativeai.protos.Schema": false,
+ "google.generativeai.protos.Schema.PropertiesEntry": false,
+ "google.generativeai.protos.Schema.PropertiesEntry.__call__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__eq__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__ge__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__gt__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__init__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__le__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__lt__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__ne__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__new__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__or__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.__ror__": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.copy_from": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.deserialize": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.from_json": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.key": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.mro": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.pb": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.serialize": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.to_dict": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.to_json": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.value": true,
+ "google.generativeai.protos.Schema.PropertiesEntry.wrap": true,
+ "google.generativeai.protos.Schema.__call__": true,
+ "google.generativeai.protos.Schema.__eq__": true,
+ "google.generativeai.protos.Schema.__ge__": true,
+ "google.generativeai.protos.Schema.__gt__": true,
+ "google.generativeai.protos.Schema.__init__": true,
+ "google.generativeai.protos.Schema.__le__": true,
+ "google.generativeai.protos.Schema.__lt__": true,
+ "google.generativeai.protos.Schema.__ne__": true,
+ "google.generativeai.protos.Schema.__new__": true,
+ "google.generativeai.protos.Schema.__or__": true,
+ "google.generativeai.protos.Schema.__ror__": true,
+ "google.generativeai.protos.Schema.copy_from": true,
+ "google.generativeai.protos.Schema.description": true,
+ "google.generativeai.protos.Schema.deserialize": true,
+ "google.generativeai.protos.Schema.enum": true,
+ "google.generativeai.protos.Schema.format_": true,
+ "google.generativeai.protos.Schema.from_json": true,
+ "google.generativeai.protos.Schema.items": true,
+ "google.generativeai.protos.Schema.max_items": true,
+ "google.generativeai.protos.Schema.min_items": true,
+ "google.generativeai.protos.Schema.mro": true,
+ "google.generativeai.protos.Schema.nullable": true,
+ "google.generativeai.protos.Schema.pb": true,
+ "google.generativeai.protos.Schema.properties": true,
+ "google.generativeai.protos.Schema.required": true,
+ "google.generativeai.protos.Schema.serialize": true,
+ "google.generativeai.protos.Schema.to_dict": true,
+ "google.generativeai.protos.Schema.to_json": true,
+ "google.generativeai.protos.Schema.type_": true,
+ "google.generativeai.protos.Schema.wrap": true,
+ "google.generativeai.protos.SearchEntryPoint": false,
+ "google.generativeai.protos.SearchEntryPoint.__call__": true,
+ "google.generativeai.protos.SearchEntryPoint.__eq__": true,
+ "google.generativeai.protos.SearchEntryPoint.__ge__": true,
+ "google.generativeai.protos.SearchEntryPoint.__gt__": true,
+ "google.generativeai.protos.SearchEntryPoint.__init__": true,
+ "google.generativeai.protos.SearchEntryPoint.__le__": true,
+ "google.generativeai.protos.SearchEntryPoint.__lt__": true,
+ "google.generativeai.protos.SearchEntryPoint.__ne__": true,
+ "google.generativeai.protos.SearchEntryPoint.__new__": true,
+ "google.generativeai.protos.SearchEntryPoint.__or__": true,
+ "google.generativeai.protos.SearchEntryPoint.__ror__": true,
+ "google.generativeai.protos.SearchEntryPoint.copy_from": true,
+ "google.generativeai.protos.SearchEntryPoint.deserialize": true,
+ "google.generativeai.protos.SearchEntryPoint.from_json": true,
+ "google.generativeai.protos.SearchEntryPoint.mro": true,
+ "google.generativeai.protos.SearchEntryPoint.pb": true,
+ "google.generativeai.protos.SearchEntryPoint.rendered_content": true,
+ "google.generativeai.protos.SearchEntryPoint.sdk_blob": true,
+ "google.generativeai.protos.SearchEntryPoint.serialize": true,
+ "google.generativeai.protos.SearchEntryPoint.to_dict": true,
+ "google.generativeai.protos.SearchEntryPoint.to_json": true,
+ "google.generativeai.protos.SearchEntryPoint.wrap": true,
+ "google.generativeai.protos.Segment": false,
+ "google.generativeai.protos.Segment.__call__": true,
+ "google.generativeai.protos.Segment.__eq__": true,
+ "google.generativeai.protos.Segment.__ge__": true,
+ "google.generativeai.protos.Segment.__gt__": true,
+ "google.generativeai.protos.Segment.__init__": true,
+ "google.generativeai.protos.Segment.__le__": true,
+ "google.generativeai.protos.Segment.__lt__": true,
+ "google.generativeai.protos.Segment.__ne__": true,
+ "google.generativeai.protos.Segment.__new__": true,
+ "google.generativeai.protos.Segment.__or__": true,
+ "google.generativeai.protos.Segment.__ror__": true,
+ "google.generativeai.protos.Segment.copy_from": true,
+ "google.generativeai.protos.Segment.deserialize": true,
+ "google.generativeai.protos.Segment.end_index": true,
+ "google.generativeai.protos.Segment.from_json": true,
+ "google.generativeai.protos.Segment.mro": true,
+ "google.generativeai.protos.Segment.part_index": true,
+ "google.generativeai.protos.Segment.pb": true,
+ "google.generativeai.protos.Segment.serialize": true,
+ "google.generativeai.protos.Segment.start_index": true,
+ "google.generativeai.protos.Segment.text": true,
+ "google.generativeai.protos.Segment.to_dict": true,
+ "google.generativeai.protos.Segment.to_json": true,
+ "google.generativeai.protos.Segment.wrap": true,
+ "google.generativeai.protos.SemanticRetrieverConfig": false,
+ "google.generativeai.protos.SemanticRetrieverConfig.__call__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__eq__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__ge__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__gt__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__init__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__le__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__lt__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__ne__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__new__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__or__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.__ror__": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.copy_from": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.deserialize": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.from_json": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.max_chunks_count": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.metadata_filters": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.minimum_relevance_score": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.mro": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.pb": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.query": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.serialize": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.source": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.to_dict": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.to_json": true,
+ "google.generativeai.protos.SemanticRetrieverConfig.wrap": true,
+ "google.generativeai.protos.StringList": false,
+ "google.generativeai.protos.StringList.__call__": true,
+ "google.generativeai.protos.StringList.__eq__": true,
+ "google.generativeai.protos.StringList.__ge__": true,
+ "google.generativeai.protos.StringList.__gt__": true,
+ "google.generativeai.protos.StringList.__init__": true,
+ "google.generativeai.protos.StringList.__le__": true,
+ "google.generativeai.protos.StringList.__lt__": true,
+ "google.generativeai.protos.StringList.__ne__": true,
+ "google.generativeai.protos.StringList.__new__": true,
+ "google.generativeai.protos.StringList.__or__": true,
+ "google.generativeai.protos.StringList.__ror__": true,
+ "google.generativeai.protos.StringList.copy_from": true,
+ "google.generativeai.protos.StringList.deserialize": true,
+ "google.generativeai.protos.StringList.from_json": true,
+ "google.generativeai.protos.StringList.mro": true,
+ "google.generativeai.protos.StringList.pb": true,
+ "google.generativeai.protos.StringList.serialize": true,
+ "google.generativeai.protos.StringList.to_dict": true,
+ "google.generativeai.protos.StringList.to_json": true,
+ "google.generativeai.protos.StringList.values": true,
+ "google.generativeai.protos.StringList.wrap": true,
+ "google.generativeai.protos.TaskType": false,
+ "google.generativeai.protos.TaskType.CLASSIFICATION": true,
+ "google.generativeai.protos.TaskType.CLUSTERING": true,
+ "google.generativeai.protos.TaskType.FACT_VERIFICATION": true,
+ "google.generativeai.protos.TaskType.QUESTION_ANSWERING": true,
+ "google.generativeai.protos.TaskType.RETRIEVAL_DOCUMENT": true,
+ "google.generativeai.protos.TaskType.RETRIEVAL_QUERY": true,
+ "google.generativeai.protos.TaskType.SEMANTIC_SIMILARITY": true,
+ "google.generativeai.protos.TaskType.TASK_TYPE_UNSPECIFIED": true,
+ "google.generativeai.protos.TaskType.__abs__": true,
+ "google.generativeai.protos.TaskType.__add__": true,
+ "google.generativeai.protos.TaskType.__and__": true,
+ "google.generativeai.protos.TaskType.__bool__": true,
+ "google.generativeai.protos.TaskType.__contains__": true,
+ "google.generativeai.protos.TaskType.__eq__": true,
+ "google.generativeai.protos.TaskType.__floordiv__": true,
+ "google.generativeai.protos.TaskType.__ge__": true,
+ "google.generativeai.protos.TaskType.__getitem__": true,
+ "google.generativeai.protos.TaskType.__gt__": true,
+ "google.generativeai.protos.TaskType.__init__": true,
+ "google.generativeai.protos.TaskType.__invert__": true,
+ "google.generativeai.protos.TaskType.__iter__": true,
+ "google.generativeai.protos.TaskType.__le__": true,
+ "google.generativeai.protos.TaskType.__len__": true,
+ "google.generativeai.protos.TaskType.__lshift__": true,
+ "google.generativeai.protos.TaskType.__lt__": true,
+ "google.generativeai.protos.TaskType.__mod__": true,
+ "google.generativeai.protos.TaskType.__mul__": true,
+ "google.generativeai.protos.TaskType.__ne__": true,
+ "google.generativeai.protos.TaskType.__neg__": true,
+ "google.generativeai.protos.TaskType.__new__": true,
+ "google.generativeai.protos.TaskType.__or__": true,
+ "google.generativeai.protos.TaskType.__pos__": true,
+ "google.generativeai.protos.TaskType.__pow__": true,
+ "google.generativeai.protos.TaskType.__radd__": true,
+ "google.generativeai.protos.TaskType.__rand__": true,
+ "google.generativeai.protos.TaskType.__rfloordiv__": true,
+ "google.generativeai.protos.TaskType.__rlshift__": true,
+ "google.generativeai.protos.TaskType.__rmod__": true,
+ "google.generativeai.protos.TaskType.__rmul__": true,
+ "google.generativeai.protos.TaskType.__ror__": true,
+ "google.generativeai.protos.TaskType.__rpow__": true,
+ "google.generativeai.protos.TaskType.__rrshift__": true,
+ "google.generativeai.protos.TaskType.__rshift__": true,
+ "google.generativeai.protos.TaskType.__rsub__": true,
+ "google.generativeai.protos.TaskType.__rtruediv__": true,
+ "google.generativeai.protos.TaskType.__rxor__": true,
+ "google.generativeai.protos.TaskType.__sub__": true,
+ "google.generativeai.protos.TaskType.__truediv__": true,
+ "google.generativeai.protos.TaskType.__xor__": true,
+ "google.generativeai.protos.TaskType.as_integer_ratio": true,
+ "google.generativeai.protos.TaskType.bit_count": true,
+ "google.generativeai.protos.TaskType.bit_length": true,
+ "google.generativeai.protos.TaskType.conjugate": true,
+ "google.generativeai.protos.TaskType.denominator": true,
+ "google.generativeai.protos.TaskType.from_bytes": true,
+ "google.generativeai.protos.TaskType.imag": true,
+ "google.generativeai.protos.TaskType.is_integer": true,
+ "google.generativeai.protos.TaskType.numerator": true,
+ "google.generativeai.protos.TaskType.real": true,
+ "google.generativeai.protos.TaskType.to_bytes": true,
+ "google.generativeai.protos.TextCompletion": false,
+ "google.generativeai.protos.TextCompletion.__call__": true,
+ "google.generativeai.protos.TextCompletion.__eq__": true,
+ "google.generativeai.protos.TextCompletion.__ge__": true,
+ "google.generativeai.protos.TextCompletion.__gt__": true,
+ "google.generativeai.protos.TextCompletion.__init__": true,
+ "google.generativeai.protos.TextCompletion.__le__": true,
+ "google.generativeai.protos.TextCompletion.__lt__": true,
+ "google.generativeai.protos.TextCompletion.__ne__": true,
+ "google.generativeai.protos.TextCompletion.__new__": true,
+ "google.generativeai.protos.TextCompletion.__or__": true,
+ "google.generativeai.protos.TextCompletion.__ror__": true,
+ "google.generativeai.protos.TextCompletion.citation_metadata": true,
+ "google.generativeai.protos.TextCompletion.copy_from": true,
+ "google.generativeai.protos.TextCompletion.deserialize": true,
+ "google.generativeai.protos.TextCompletion.from_json": true,
+ "google.generativeai.protos.TextCompletion.mro": true,
+ "google.generativeai.protos.TextCompletion.output": true,
+ "google.generativeai.protos.TextCompletion.pb": true,
+ "google.generativeai.protos.TextCompletion.safety_ratings": true,
+ "google.generativeai.protos.TextCompletion.serialize": true,
+ "google.generativeai.protos.TextCompletion.to_dict": true,
+ "google.generativeai.protos.TextCompletion.to_json": true,
+ "google.generativeai.protos.TextCompletion.wrap": true,
+ "google.generativeai.protos.TextPrompt": false,
+ "google.generativeai.protos.TextPrompt.__call__": true,
+ "google.generativeai.protos.TextPrompt.__eq__": true,
+ "google.generativeai.protos.TextPrompt.__ge__": true,
+ "google.generativeai.protos.TextPrompt.__gt__": true,
+ "google.generativeai.protos.TextPrompt.__init__": true,
+ "google.generativeai.protos.TextPrompt.__le__": true,
+ "google.generativeai.protos.TextPrompt.__lt__": true,
+ "google.generativeai.protos.TextPrompt.__ne__": true,
+ "google.generativeai.protos.TextPrompt.__new__": true,
+ "google.generativeai.protos.TextPrompt.__or__": true,
+ "google.generativeai.protos.TextPrompt.__ror__": true,
+ "google.generativeai.protos.TextPrompt.copy_from": true,
+ "google.generativeai.protos.TextPrompt.deserialize": true,
+ "google.generativeai.protos.TextPrompt.from_json": true,
+ "google.generativeai.protos.TextPrompt.mro": true,
+ "google.generativeai.protos.TextPrompt.pb": true,
+ "google.generativeai.protos.TextPrompt.serialize": true,
+ "google.generativeai.protos.TextPrompt.text": true,
+ "google.generativeai.protos.TextPrompt.to_dict": true,
+ "google.generativeai.protos.TextPrompt.to_json": true,
+ "google.generativeai.protos.TextPrompt.wrap": true,
+ "google.generativeai.protos.Tool": false,
+ "google.generativeai.protos.Tool.__call__": true,
+ "google.generativeai.protos.Tool.__eq__": true,
+ "google.generativeai.protos.Tool.__ge__": true,
+ "google.generativeai.protos.Tool.__gt__": true,
+ "google.generativeai.protos.Tool.__init__": true,
+ "google.generativeai.protos.Tool.__le__": true,
+ "google.generativeai.protos.Tool.__lt__": true,
+ "google.generativeai.protos.Tool.__ne__": true,
+ "google.generativeai.protos.Tool.__new__": true,
+ "google.generativeai.protos.Tool.__or__": true,
+ "google.generativeai.protos.Tool.__ror__": true,
+ "google.generativeai.protos.Tool.code_execution": true,
+ "google.generativeai.protos.Tool.copy_from": true,
+ "google.generativeai.protos.Tool.deserialize": true,
+ "google.generativeai.protos.Tool.from_json": true,
+ "google.generativeai.protos.Tool.function_declarations": true,
+ "google.generativeai.protos.Tool.google_search_retrieval": true,
+ "google.generativeai.protos.Tool.mro": true,
+ "google.generativeai.protos.Tool.pb": true,
+ "google.generativeai.protos.Tool.serialize": true,
+ "google.generativeai.protos.Tool.to_dict": true,
+ "google.generativeai.protos.Tool.to_json": true,
+ "google.generativeai.protos.Tool.wrap": true,
+ "google.generativeai.protos.ToolConfig": false,
+ "google.generativeai.protos.ToolConfig.__call__": true,
+ "google.generativeai.protos.ToolConfig.__eq__": true,
+ "google.generativeai.protos.ToolConfig.__ge__": true,
+ "google.generativeai.protos.ToolConfig.__gt__": true,
+ "google.generativeai.protos.ToolConfig.__init__": true,
+ "google.generativeai.protos.ToolConfig.__le__": true,
+ "google.generativeai.protos.ToolConfig.__lt__": true,
+ "google.generativeai.protos.ToolConfig.__ne__": true,
+ "google.generativeai.protos.ToolConfig.__new__": true,
+ "google.generativeai.protos.ToolConfig.__or__": true,
+ "google.generativeai.protos.ToolConfig.__ror__": true,
+ "google.generativeai.protos.ToolConfig.copy_from": true,
+ "google.generativeai.protos.ToolConfig.deserialize": true,
+ "google.generativeai.protos.ToolConfig.from_json": true,
+ "google.generativeai.protos.ToolConfig.function_calling_config": true,
+ "google.generativeai.protos.ToolConfig.mro": true,
+ "google.generativeai.protos.ToolConfig.pb": true,
+ "google.generativeai.protos.ToolConfig.serialize": true,
+ "google.generativeai.protos.ToolConfig.to_dict": true,
+ "google.generativeai.protos.ToolConfig.to_json": true,
+ "google.generativeai.protos.ToolConfig.wrap": true,
+ "google.generativeai.protos.TransferOwnershipRequest": false,
+ "google.generativeai.protos.TransferOwnershipRequest.__call__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__eq__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__ge__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__gt__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__init__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__le__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__lt__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__ne__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__new__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__or__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.__ror__": true,
+ "google.generativeai.protos.TransferOwnershipRequest.copy_from": true,
+ "google.generativeai.protos.TransferOwnershipRequest.deserialize": true,
+ "google.generativeai.protos.TransferOwnershipRequest.email_address": true,
+ "google.generativeai.protos.TransferOwnershipRequest.from_json": true,
+ "google.generativeai.protos.TransferOwnershipRequest.mro": true,
+ "google.generativeai.protos.TransferOwnershipRequest.name": true,
+ "google.generativeai.protos.TransferOwnershipRequest.pb": true,
+ "google.generativeai.protos.TransferOwnershipRequest.serialize": true,
+ "google.generativeai.protos.TransferOwnershipRequest.to_dict": true,
+ "google.generativeai.protos.TransferOwnershipRequest.to_json": true,
+ "google.generativeai.protos.TransferOwnershipRequest.wrap": true,
+ "google.generativeai.protos.TransferOwnershipResponse": false,
+ "google.generativeai.protos.TransferOwnershipResponse.__call__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__eq__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__ge__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__gt__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__init__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__le__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__lt__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__ne__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__new__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__or__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.__ror__": true,
+ "google.generativeai.protos.TransferOwnershipResponse.copy_from": true,
+ "google.generativeai.protos.TransferOwnershipResponse.deserialize": true,
+ "google.generativeai.protos.TransferOwnershipResponse.from_json": true,
+ "google.generativeai.protos.TransferOwnershipResponse.mro": true,
+ "google.generativeai.protos.TransferOwnershipResponse.pb": true,
+ "google.generativeai.protos.TransferOwnershipResponse.serialize": true,
+ "google.generativeai.protos.TransferOwnershipResponse.to_dict": true,
+ "google.generativeai.protos.TransferOwnershipResponse.to_json": true,
+ "google.generativeai.protos.TransferOwnershipResponse.wrap": true,
+ "google.generativeai.protos.TunedModel": false,
+ "google.generativeai.protos.TunedModel.State": false,
+ "google.generativeai.protos.TunedModel.State.ACTIVE": true,
+ "google.generativeai.protos.TunedModel.State.CREATING": true,
+ "google.generativeai.protos.TunedModel.State.FAILED": true,
+ "google.generativeai.protos.TunedModel.State.STATE_UNSPECIFIED": true,
+ "google.generativeai.protos.TunedModel.State.__abs__": true,
+ "google.generativeai.protos.TunedModel.State.__add__": true,
+ "google.generativeai.protos.TunedModel.State.__and__": true,
+ "google.generativeai.protos.TunedModel.State.__bool__": true,
+ "google.generativeai.protos.TunedModel.State.__contains__": true,
+ "google.generativeai.protos.TunedModel.State.__eq__": true,
+ "google.generativeai.protos.TunedModel.State.__floordiv__": true,
+ "google.generativeai.protos.TunedModel.State.__ge__": true,
+ "google.generativeai.protos.TunedModel.State.__getitem__": true,
+ "google.generativeai.protos.TunedModel.State.__gt__": true,
+ "google.generativeai.protos.TunedModel.State.__init__": true,
+ "google.generativeai.protos.TunedModel.State.__invert__": true,
+ "google.generativeai.protos.TunedModel.State.__iter__": true,
+ "google.generativeai.protos.TunedModel.State.__le__": true,
+ "google.generativeai.protos.TunedModel.State.__len__": true,
+ "google.generativeai.protos.TunedModel.State.__lshift__": true,
+ "google.generativeai.protos.TunedModel.State.__lt__": true,
+ "google.generativeai.protos.TunedModel.State.__mod__": true,
+ "google.generativeai.protos.TunedModel.State.__mul__": true,
+ "google.generativeai.protos.TunedModel.State.__ne__": true,
+ "google.generativeai.protos.TunedModel.State.__neg__": true,
+ "google.generativeai.protos.TunedModel.State.__new__": true,
+ "google.generativeai.protos.TunedModel.State.__or__": true,
+ "google.generativeai.protos.TunedModel.State.__pos__": true,
+ "google.generativeai.protos.TunedModel.State.__pow__": true,
+ "google.generativeai.protos.TunedModel.State.__radd__": true,
+ "google.generativeai.protos.TunedModel.State.__rand__": true,
+ "google.generativeai.protos.TunedModel.State.__rfloordiv__": true,
+ "google.generativeai.protos.TunedModel.State.__rlshift__": true,
+ "google.generativeai.protos.TunedModel.State.__rmod__": true,
+ "google.generativeai.protos.TunedModel.State.__rmul__": true,
+ "google.generativeai.protos.TunedModel.State.__ror__": true,
+ "google.generativeai.protos.TunedModel.State.__rpow__": true,
+ "google.generativeai.protos.TunedModel.State.__rrshift__": true,
+ "google.generativeai.protos.TunedModel.State.__rshift__": true,
+ "google.generativeai.protos.TunedModel.State.__rsub__": true,
+ "google.generativeai.protos.TunedModel.State.__rtruediv__": true,
+ "google.generativeai.protos.TunedModel.State.__rxor__": true,
+ "google.generativeai.protos.TunedModel.State.__sub__": true,
+ "google.generativeai.protos.TunedModel.State.__truediv__": true,
+ "google.generativeai.protos.TunedModel.State.__xor__": true,
+ "google.generativeai.protos.TunedModel.State.as_integer_ratio": true,
+ "google.generativeai.protos.TunedModel.State.bit_count": true,
+ "google.generativeai.protos.TunedModel.State.bit_length": true,
+ "google.generativeai.protos.TunedModel.State.conjugate": true,
+ "google.generativeai.protos.TunedModel.State.denominator": true,
+ "google.generativeai.protos.TunedModel.State.from_bytes": true,
+ "google.generativeai.protos.TunedModel.State.imag": true,
+ "google.generativeai.protos.TunedModel.State.is_integer": true,
+ "google.generativeai.protos.TunedModel.State.numerator": true,
+ "google.generativeai.protos.TunedModel.State.real": true,
+ "google.generativeai.protos.TunedModel.State.to_bytes": true,
+ "google.generativeai.protos.TunedModel.__call__": true,
+ "google.generativeai.protos.TunedModel.__eq__": true,
+ "google.generativeai.protos.TunedModel.__ge__": true,
+ "google.generativeai.protos.TunedModel.__gt__": true,
+ "google.generativeai.protos.TunedModel.__init__": true,
+ "google.generativeai.protos.TunedModel.__le__": true,
+ "google.generativeai.protos.TunedModel.__lt__": true,
+ "google.generativeai.protos.TunedModel.__ne__": true,
+ "google.generativeai.protos.TunedModel.__new__": true,
+ "google.generativeai.protos.TunedModel.__or__": true,
+ "google.generativeai.protos.TunedModel.__ror__": true,
+ "google.generativeai.protos.TunedModel.base_model": true,
+ "google.generativeai.protos.TunedModel.copy_from": true,
+ "google.generativeai.protos.TunedModel.create_time": true,
+ "google.generativeai.protos.TunedModel.description": true,
+ "google.generativeai.protos.TunedModel.deserialize": true,
+ "google.generativeai.protos.TunedModel.display_name": true,
+ "google.generativeai.protos.TunedModel.from_json": true,
+ "google.generativeai.protos.TunedModel.mro": true,
+ "google.generativeai.protos.TunedModel.name": true,
+ "google.generativeai.protos.TunedModel.pb": true,
+ "google.generativeai.protos.TunedModel.reader_project_numbers": true,
+ "google.generativeai.protos.TunedModel.serialize": true,
+ "google.generativeai.protos.TunedModel.state": true,
+ "google.generativeai.protos.TunedModel.temperature": true,
+ "google.generativeai.protos.TunedModel.to_dict": true,
+ "google.generativeai.protos.TunedModel.to_json": true,
+ "google.generativeai.protos.TunedModel.top_k": true,
+ "google.generativeai.protos.TunedModel.top_p": true,
+ "google.generativeai.protos.TunedModel.tuned_model_source": true,
+ "google.generativeai.protos.TunedModel.tuning_task": true,
+ "google.generativeai.protos.TunedModel.update_time": true,
+ "google.generativeai.protos.TunedModel.wrap": true,
+ "google.generativeai.protos.TunedModelSource": false,
+ "google.generativeai.protos.TunedModelSource.__call__": true,
+ "google.generativeai.protos.TunedModelSource.__eq__": true,
+ "google.generativeai.protos.TunedModelSource.__ge__": true,
+ "google.generativeai.protos.TunedModelSource.__gt__": true,
+ "google.generativeai.protos.TunedModelSource.__init__": true,
+ "google.generativeai.protos.TunedModelSource.__le__": true,
+ "google.generativeai.protos.TunedModelSource.__lt__": true,
+ "google.generativeai.protos.TunedModelSource.__ne__": true,
+ "google.generativeai.protos.TunedModelSource.__new__": true,
+ "google.generativeai.protos.TunedModelSource.__or__": true,
+ "google.generativeai.protos.TunedModelSource.__ror__": true,
+ "google.generativeai.protos.TunedModelSource.base_model": true,
+ "google.generativeai.protos.TunedModelSource.copy_from": true,
+ "google.generativeai.protos.TunedModelSource.deserialize": true,
+ "google.generativeai.protos.TunedModelSource.from_json": true,
+ "google.generativeai.protos.TunedModelSource.mro": true,
+ "google.generativeai.protos.TunedModelSource.pb": true,
+ "google.generativeai.protos.TunedModelSource.serialize": true,
+ "google.generativeai.protos.TunedModelSource.to_dict": true,
+ "google.generativeai.protos.TunedModelSource.to_json": true,
+ "google.generativeai.protos.TunedModelSource.tuned_model": true,
+ "google.generativeai.protos.TunedModelSource.wrap": true,
+ "google.generativeai.protos.TuningExample": false,
+ "google.generativeai.protos.TuningExample.__call__": true,
+ "google.generativeai.protos.TuningExample.__eq__": true,
+ "google.generativeai.protos.TuningExample.__ge__": true,
+ "google.generativeai.protos.TuningExample.__gt__": true,
+ "google.generativeai.protos.TuningExample.__init__": true,
+ "google.generativeai.protos.TuningExample.__le__": true,
+ "google.generativeai.protos.TuningExample.__lt__": true,
+ "google.generativeai.protos.TuningExample.__ne__": true,
+ "google.generativeai.protos.TuningExample.__new__": true,
+ "google.generativeai.protos.TuningExample.__or__": true,
+ "google.generativeai.protos.TuningExample.__ror__": true,
+ "google.generativeai.protos.TuningExample.copy_from": true,
+ "google.generativeai.protos.TuningExample.deserialize": true,
+ "google.generativeai.protos.TuningExample.from_json": true,
+ "google.generativeai.protos.TuningExample.mro": true,
+ "google.generativeai.protos.TuningExample.output": true,
+ "google.generativeai.protos.TuningExample.pb": true,
+ "google.generativeai.protos.TuningExample.serialize": true,
+ "google.generativeai.protos.TuningExample.text_input": true,
+ "google.generativeai.protos.TuningExample.to_dict": true,
+ "google.generativeai.protos.TuningExample.to_json": true,
+ "google.generativeai.protos.TuningExample.wrap": true,
+ "google.generativeai.protos.TuningExamples": false,
+ "google.generativeai.protos.TuningExamples.__call__": true,
+ "google.generativeai.protos.TuningExamples.__eq__": true,
+ "google.generativeai.protos.TuningExamples.__ge__": true,
+ "google.generativeai.protos.TuningExamples.__gt__": true,
+ "google.generativeai.protos.TuningExamples.__init__": true,
+ "google.generativeai.protos.TuningExamples.__le__": true,
+ "google.generativeai.protos.TuningExamples.__lt__": true,
+ "google.generativeai.protos.TuningExamples.__ne__": true,
+ "google.generativeai.protos.TuningExamples.__new__": true,
+ "google.generativeai.protos.TuningExamples.__or__": true,
+ "google.generativeai.protos.TuningExamples.__ror__": true,
+ "google.generativeai.protos.TuningExamples.copy_from": true,
+ "google.generativeai.protos.TuningExamples.deserialize": true,
+ "google.generativeai.protos.TuningExamples.examples": true,
+ "google.generativeai.protos.TuningExamples.from_json": true,
+ "google.generativeai.protos.TuningExamples.mro": true,
+ "google.generativeai.protos.TuningExamples.pb": true,
+ "google.generativeai.protos.TuningExamples.serialize": true,
+ "google.generativeai.protos.TuningExamples.to_dict": true,
+ "google.generativeai.protos.TuningExamples.to_json": true,
+ "google.generativeai.protos.TuningExamples.wrap": true,
+ "google.generativeai.protos.TuningSnapshot": false,
+ "google.generativeai.protos.TuningSnapshot.__call__": true,
+ "google.generativeai.protos.TuningSnapshot.__eq__": true,
+ "google.generativeai.protos.TuningSnapshot.__ge__": true,
+ "google.generativeai.protos.TuningSnapshot.__gt__": true,
+ "google.generativeai.protos.TuningSnapshot.__init__": true,
+ "google.generativeai.protos.TuningSnapshot.__le__": true,
+ "google.generativeai.protos.TuningSnapshot.__lt__": true,
+ "google.generativeai.protos.TuningSnapshot.__ne__": true,
+ "google.generativeai.protos.TuningSnapshot.__new__": true,
+ "google.generativeai.protos.TuningSnapshot.__or__": true,
+ "google.generativeai.protos.TuningSnapshot.__ror__": true,
+ "google.generativeai.protos.TuningSnapshot.compute_time": true,
+ "google.generativeai.protos.TuningSnapshot.copy_from": true,
+ "google.generativeai.protos.TuningSnapshot.deserialize": true,
+ "google.generativeai.protos.TuningSnapshot.epoch": true,
+ "google.generativeai.protos.TuningSnapshot.from_json": true,
+ "google.generativeai.protos.TuningSnapshot.mean_loss": true,
+ "google.generativeai.protos.TuningSnapshot.mro": true,
+ "google.generativeai.protos.TuningSnapshot.pb": true,
+ "google.generativeai.protos.TuningSnapshot.serialize": true,
+ "google.generativeai.protos.TuningSnapshot.step": true,
+ "google.generativeai.protos.TuningSnapshot.to_dict": true,
+ "google.generativeai.protos.TuningSnapshot.to_json": true,
+ "google.generativeai.protos.TuningSnapshot.wrap": true,
+ "google.generativeai.protos.TuningTask": false,
+ "google.generativeai.protos.TuningTask.__call__": true,
+ "google.generativeai.protos.TuningTask.__eq__": true,
+ "google.generativeai.protos.TuningTask.__ge__": true,
+ "google.generativeai.protos.TuningTask.__gt__": true,
+ "google.generativeai.protos.TuningTask.__init__": true,
+ "google.generativeai.protos.TuningTask.__le__": true,
+ "google.generativeai.protos.TuningTask.__lt__": true,
+ "google.generativeai.protos.TuningTask.__ne__": true,
+ "google.generativeai.protos.TuningTask.__new__": true,
+ "google.generativeai.protos.TuningTask.__or__": true,
+ "google.generativeai.protos.TuningTask.__ror__": true,
+ "google.generativeai.protos.TuningTask.complete_time": true,
+ "google.generativeai.protos.TuningTask.copy_from": true,
+ "google.generativeai.protos.TuningTask.deserialize": true,
+ "google.generativeai.protos.TuningTask.from_json": true,
+ "google.generativeai.protos.TuningTask.hyperparameters": true,
+ "google.generativeai.protos.TuningTask.mro": true,
+ "google.generativeai.protos.TuningTask.pb": true,
+ "google.generativeai.protos.TuningTask.serialize": true,
+ "google.generativeai.protos.TuningTask.snapshots": true,
+ "google.generativeai.protos.TuningTask.start_time": true,
+ "google.generativeai.protos.TuningTask.to_dict": true,
+ "google.generativeai.protos.TuningTask.to_json": true,
+ "google.generativeai.protos.TuningTask.training_data": true,
+ "google.generativeai.protos.TuningTask.wrap": true,
+ "google.generativeai.protos.Type": false,
+ "google.generativeai.protos.Type.ARRAY": true,
+ "google.generativeai.protos.Type.BOOLEAN": true,
+ "google.generativeai.protos.Type.INTEGER": true,
+ "google.generativeai.protos.Type.NUMBER": true,
+ "google.generativeai.protos.Type.OBJECT": true,
+ "google.generativeai.protos.Type.STRING": true,
+ "google.generativeai.protos.Type.TYPE_UNSPECIFIED": true,
+ "google.generativeai.protos.Type.__abs__": true,
+ "google.generativeai.protos.Type.__add__": true,
+ "google.generativeai.protos.Type.__and__": true,
+ "google.generativeai.protos.Type.__bool__": true,
+ "google.generativeai.protos.Type.__contains__": true,
+ "google.generativeai.protos.Type.__eq__": true,
+ "google.generativeai.protos.Type.__floordiv__": true,
+ "google.generativeai.protos.Type.__ge__": true,
+ "google.generativeai.protos.Type.__getitem__": true,
+ "google.generativeai.protos.Type.__gt__": true,
+ "google.generativeai.protos.Type.__init__": true,
+ "google.generativeai.protos.Type.__invert__": true,
+ "google.generativeai.protos.Type.__iter__": true,
+ "google.generativeai.protos.Type.__le__": true,
+ "google.generativeai.protos.Type.__len__": true,
+ "google.generativeai.protos.Type.__lshift__": true,
+ "google.generativeai.protos.Type.__lt__": true,
+ "google.generativeai.protos.Type.__mod__": true,
+ "google.generativeai.protos.Type.__mul__": true,
+ "google.generativeai.protos.Type.__ne__": true,
+ "google.generativeai.protos.Type.__neg__": true,
+ "google.generativeai.protos.Type.__new__": true,
+ "google.generativeai.protos.Type.__or__": true,
+ "google.generativeai.protos.Type.__pos__": true,
+ "google.generativeai.protos.Type.__pow__": true,
+ "google.generativeai.protos.Type.__radd__": true,
+ "google.generativeai.protos.Type.__rand__": true,
+ "google.generativeai.protos.Type.__rfloordiv__": true,
+ "google.generativeai.protos.Type.__rlshift__": true,
+ "google.generativeai.protos.Type.__rmod__": true,
+ "google.generativeai.protos.Type.__rmul__": true,
+ "google.generativeai.protos.Type.__ror__": true,
+ "google.generativeai.protos.Type.__rpow__": true,
+ "google.generativeai.protos.Type.__rrshift__": true,
+ "google.generativeai.protos.Type.__rshift__": true,
+ "google.generativeai.protos.Type.__rsub__": true,
+ "google.generativeai.protos.Type.__rtruediv__": true,
+ "google.generativeai.protos.Type.__rxor__": true,
+ "google.generativeai.protos.Type.__sub__": true,
+ "google.generativeai.protos.Type.__truediv__": true,
+ "google.generativeai.protos.Type.__xor__": true,
+ "google.generativeai.protos.Type.as_integer_ratio": true,
+ "google.generativeai.protos.Type.bit_count": true,
+ "google.generativeai.protos.Type.bit_length": true,
+ "google.generativeai.protos.Type.conjugate": true,
+ "google.generativeai.protos.Type.denominator": true,
+ "google.generativeai.protos.Type.from_bytes": true,
+ "google.generativeai.protos.Type.imag": true,
+ "google.generativeai.protos.Type.is_integer": true,
+ "google.generativeai.protos.Type.numerator": true,
+ "google.generativeai.protos.Type.real": true,
+ "google.generativeai.protos.Type.to_bytes": true,
+ "google.generativeai.protos.UpdateCachedContentRequest": false,
+ "google.generativeai.protos.UpdateCachedContentRequest.__call__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__eq__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__ge__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__gt__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__init__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__le__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__lt__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__ne__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__new__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__or__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.__ror__": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.cached_content": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.copy_from": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.deserialize": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.from_json": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.mro": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.pb": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.serialize": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.to_dict": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.to_json": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.update_mask": true,
+ "google.generativeai.protos.UpdateCachedContentRequest.wrap": true,
+ "google.generativeai.protos.UpdateChunkRequest": false,
+ "google.generativeai.protos.UpdateChunkRequest.__call__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__eq__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__ge__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__gt__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__init__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__le__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__lt__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__ne__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__new__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__or__": true,
+ "google.generativeai.protos.UpdateChunkRequest.__ror__": true,
+ "google.generativeai.protos.UpdateChunkRequest.chunk": true,
+ "google.generativeai.protos.UpdateChunkRequest.copy_from": true,
+ "google.generativeai.protos.UpdateChunkRequest.deserialize": true,
+ "google.generativeai.protos.UpdateChunkRequest.from_json": true,
+ "google.generativeai.protos.UpdateChunkRequest.mro": true,
+ "google.generativeai.protos.UpdateChunkRequest.pb": true,
+ "google.generativeai.protos.UpdateChunkRequest.serialize": true,
+ "google.generativeai.protos.UpdateChunkRequest.to_dict": true,
+ "google.generativeai.protos.UpdateChunkRequest.to_json": true,
+ "google.generativeai.protos.UpdateChunkRequest.update_mask": true,
+ "google.generativeai.protos.UpdateChunkRequest.wrap": true,
+ "google.generativeai.protos.UpdateCorpusRequest": false,
+ "google.generativeai.protos.UpdateCorpusRequest.__call__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__eq__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__ge__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__gt__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__init__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__le__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__lt__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__ne__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__new__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__or__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.__ror__": true,
+ "google.generativeai.protos.UpdateCorpusRequest.copy_from": true,
+ "google.generativeai.protos.UpdateCorpusRequest.corpus": true,
+ "google.generativeai.protos.UpdateCorpusRequest.deserialize": true,
+ "google.generativeai.protos.UpdateCorpusRequest.from_json": true,
+ "google.generativeai.protos.UpdateCorpusRequest.mro": true,
+ "google.generativeai.protos.UpdateCorpusRequest.pb": true,
+ "google.generativeai.protos.UpdateCorpusRequest.serialize": true,
+ "google.generativeai.protos.UpdateCorpusRequest.to_dict": true,
+ "google.generativeai.protos.UpdateCorpusRequest.to_json": true,
+ "google.generativeai.protos.UpdateCorpusRequest.update_mask": true,
+ "google.generativeai.protos.UpdateCorpusRequest.wrap": true,
+ "google.generativeai.protos.UpdateDocumentRequest": false,
+ "google.generativeai.protos.UpdateDocumentRequest.__call__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__eq__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__ge__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__gt__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__init__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__le__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__lt__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__ne__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__new__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__or__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.__ror__": true,
+ "google.generativeai.protos.UpdateDocumentRequest.copy_from": true,
+ "google.generativeai.protos.UpdateDocumentRequest.deserialize": true,
+ "google.generativeai.protos.UpdateDocumentRequest.document": true,
+ "google.generativeai.protos.UpdateDocumentRequest.from_json": true,
+ "google.generativeai.protos.UpdateDocumentRequest.mro": true,
+ "google.generativeai.protos.UpdateDocumentRequest.pb": true,
+ "google.generativeai.protos.UpdateDocumentRequest.serialize": true,
+ "google.generativeai.protos.UpdateDocumentRequest.to_dict": true,
+ "google.generativeai.protos.UpdateDocumentRequest.to_json": true,
+ "google.generativeai.protos.UpdateDocumentRequest.update_mask": true,
+ "google.generativeai.protos.UpdateDocumentRequest.wrap": true,
+ "google.generativeai.protos.UpdatePermissionRequest": false,
+ "google.generativeai.protos.UpdatePermissionRequest.__call__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__eq__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__ge__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__gt__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__init__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__le__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__lt__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__ne__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__new__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__or__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.__ror__": true,
+ "google.generativeai.protos.UpdatePermissionRequest.copy_from": true,
+ "google.generativeai.protos.UpdatePermissionRequest.deserialize": true,
+ "google.generativeai.protos.UpdatePermissionRequest.from_json": true,
+ "google.generativeai.protos.UpdatePermissionRequest.mro": true,
+ "google.generativeai.protos.UpdatePermissionRequest.pb": true,
+ "google.generativeai.protos.UpdatePermissionRequest.permission": true,
+ "google.generativeai.protos.UpdatePermissionRequest.serialize": true,
+ "google.generativeai.protos.UpdatePermissionRequest.to_dict": true,
+ "google.generativeai.protos.UpdatePermissionRequest.to_json": true,
+ "google.generativeai.protos.UpdatePermissionRequest.update_mask": true,
+ "google.generativeai.protos.UpdatePermissionRequest.wrap": true,
+ "google.generativeai.protos.UpdateTunedModelRequest": false,
+ "google.generativeai.protos.UpdateTunedModelRequest.__call__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__eq__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__ge__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__gt__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__init__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__le__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__lt__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__ne__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__new__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__or__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.__ror__": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.copy_from": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.deserialize": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.from_json": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.mro": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.pb": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.serialize": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.to_dict": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.to_json": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.tuned_model": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.update_mask": true,
+ "google.generativeai.protos.UpdateTunedModelRequest.wrap": true,
+ "google.generativeai.protos.VideoMetadata": false,
+ "google.generativeai.protos.VideoMetadata.__call__": true,
+ "google.generativeai.protos.VideoMetadata.__eq__": true,
+ "google.generativeai.protos.VideoMetadata.__ge__": true,
+ "google.generativeai.protos.VideoMetadata.__gt__": true,
+ "google.generativeai.protos.VideoMetadata.__init__": true,
+ "google.generativeai.protos.VideoMetadata.__le__": true,
+ "google.generativeai.protos.VideoMetadata.__lt__": true,
+ "google.generativeai.protos.VideoMetadata.__ne__": true,
+ "google.generativeai.protos.VideoMetadata.__new__": true,
+ "google.generativeai.protos.VideoMetadata.__or__": true,
+ "google.generativeai.protos.VideoMetadata.__ror__": true,
+ "google.generativeai.protos.VideoMetadata.copy_from": true,
+ "google.generativeai.protos.VideoMetadata.deserialize": true,
+ "google.generativeai.protos.VideoMetadata.from_json": true,
+ "google.generativeai.protos.VideoMetadata.mro": true,
+ "google.generativeai.protos.VideoMetadata.pb": true,
+ "google.generativeai.protos.VideoMetadata.serialize": true,
+ "google.generativeai.protos.VideoMetadata.to_dict": true,
+ "google.generativeai.protos.VideoMetadata.to_json": true,
+ "google.generativeai.protos.VideoMetadata.video_duration": true,
+ "google.generativeai.protos.VideoMetadata.wrap": true,
+ "google.generativeai.types": false,
+ "google.generativeai.types.AnyModelNameOptions": false,
+ "google.generativeai.types.AsyncGenerateContentResponse": false,
+ "google.generativeai.types.AsyncGenerateContentResponse.__eq__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__ge__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__gt__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__init__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__le__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__lt__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__ne__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.__new__": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.candidates": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.from_response": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.parts": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.prompt_feedback": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.resolve": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.text": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.to_dict": true,
+ "google.generativeai.types.AsyncGenerateContentResponse.usage_metadata": true,
+ "google.generativeai.types.BaseModelNameOptions": false,
+ "google.generativeai.types.BlobDict": false,
+ "google.generativeai.types.BlobDict.__contains__": true,
+ "google.generativeai.types.BlobDict.__eq__": true,
+ "google.generativeai.types.BlobDict.__ge__": true,
+ "google.generativeai.types.BlobDict.__getitem__": true,
+ "google.generativeai.types.BlobDict.__gt__": true,
+ "google.generativeai.types.BlobDict.__init__": true,
+ "google.generativeai.types.BlobDict.__iter__": true,
+ "google.generativeai.types.BlobDict.__le__": true,
+ "google.generativeai.types.BlobDict.__len__": true,
+ "google.generativeai.types.BlobDict.__lt__": true,
+ "google.generativeai.types.BlobDict.__ne__": true,
+ "google.generativeai.types.BlobDict.__new__": true,
+ "google.generativeai.types.BlobDict.__or__": true,
+ "google.generativeai.types.BlobDict.__ror__": true,
+ "google.generativeai.types.BlobDict.clear": true,
+ "google.generativeai.types.BlobDict.copy": true,
+ "google.generativeai.types.BlobDict.fromkeys": true,
+ "google.generativeai.types.BlobDict.get": true,
+ "google.generativeai.types.BlobDict.items": true,
+ "google.generativeai.types.BlobDict.keys": true,
+ "google.generativeai.types.BlobDict.pop": true,
+ "google.generativeai.types.BlobDict.popitem": true,
+ "google.generativeai.types.BlobDict.setdefault": true,
+ "google.generativeai.types.BlobDict.update": true,
+ "google.generativeai.types.BlobDict.values": true,
+ "google.generativeai.types.BlobType": false,
+ "google.generativeai.types.BlockedPromptException": false,
+ "google.generativeai.types.BlockedPromptException.__eq__": true,
+ "google.generativeai.types.BlockedPromptException.__ge__": true,
+ "google.generativeai.types.BlockedPromptException.__gt__": true,
+ "google.generativeai.types.BlockedPromptException.__init__": true,
+ "google.generativeai.types.BlockedPromptException.__le__": true,
+ "google.generativeai.types.BlockedPromptException.__lt__": true,
+ "google.generativeai.types.BlockedPromptException.__ne__": true,
+ "google.generativeai.types.BlockedPromptException.__new__": true,
+ "google.generativeai.types.BlockedPromptException.add_note": true,
+ "google.generativeai.types.BlockedPromptException.args": true,
+ "google.generativeai.types.BlockedPromptException.with_traceback": true,
+ "google.generativeai.types.BlockedReason": false,
+ "google.generativeai.types.BlockedReason.BLOCKED_REASON_UNSPECIFIED": true,
+ "google.generativeai.types.BlockedReason.OTHER": true,
+ "google.generativeai.types.BlockedReason.SAFETY": true,
+ "google.generativeai.types.BlockedReason.__abs__": true,
+ "google.generativeai.types.BlockedReason.__add__": true,
+ "google.generativeai.types.BlockedReason.__and__": true,
+ "google.generativeai.types.BlockedReason.__bool__": true,
+ "google.generativeai.types.BlockedReason.__contains__": true,
+ "google.generativeai.types.BlockedReason.__eq__": true,
+ "google.generativeai.types.BlockedReason.__floordiv__": true,
+ "google.generativeai.types.BlockedReason.__ge__": true,
+ "google.generativeai.types.BlockedReason.__getitem__": true,
+ "google.generativeai.types.BlockedReason.__gt__": true,
+ "google.generativeai.types.BlockedReason.__init__": true,
+ "google.generativeai.types.BlockedReason.__invert__": true,
+ "google.generativeai.types.BlockedReason.__iter__": true,
+ "google.generativeai.types.BlockedReason.__le__": true,
+ "google.generativeai.types.BlockedReason.__len__": true,
+ "google.generativeai.types.BlockedReason.__lshift__": true,
+ "google.generativeai.types.BlockedReason.__lt__": true,
+ "google.generativeai.types.BlockedReason.__mod__": true,
+ "google.generativeai.types.BlockedReason.__mul__": true,
+ "google.generativeai.types.BlockedReason.__ne__": true,
+ "google.generativeai.types.BlockedReason.__neg__": true,
+ "google.generativeai.types.BlockedReason.__new__": true,
+ "google.generativeai.types.BlockedReason.__or__": true,
+ "google.generativeai.types.BlockedReason.__pos__": true,
+ "google.generativeai.types.BlockedReason.__pow__": true,
+ "google.generativeai.types.BlockedReason.__radd__": true,
+ "google.generativeai.types.BlockedReason.__rand__": true,
+ "google.generativeai.types.BlockedReason.__rfloordiv__": true,
+ "google.generativeai.types.BlockedReason.__rlshift__": true,
+ "google.generativeai.types.BlockedReason.__rmod__": true,
+ "google.generativeai.types.BlockedReason.__rmul__": true,
+ "google.generativeai.types.BlockedReason.__ror__": true,
+ "google.generativeai.types.BlockedReason.__rpow__": true,
+ "google.generativeai.types.BlockedReason.__rrshift__": true,
+ "google.generativeai.types.BlockedReason.__rshift__": true,
+ "google.generativeai.types.BlockedReason.__rsub__": true,
+ "google.generativeai.types.BlockedReason.__rtruediv__": true,
+ "google.generativeai.types.BlockedReason.__rxor__": true,
+ "google.generativeai.types.BlockedReason.__sub__": true,
+ "google.generativeai.types.BlockedReason.__truediv__": true,
+ "google.generativeai.types.BlockedReason.__xor__": true,
+ "google.generativeai.types.BlockedReason.as_integer_ratio": true,
+ "google.generativeai.types.BlockedReason.bit_count": true,
+ "google.generativeai.types.BlockedReason.bit_length": true,
+ "google.generativeai.types.BlockedReason.conjugate": true,
+ "google.generativeai.types.BlockedReason.denominator": true,
+ "google.generativeai.types.BlockedReason.from_bytes": true,
+ "google.generativeai.types.BlockedReason.imag": true,
+ "google.generativeai.types.BlockedReason.is_integer": true,
+ "google.generativeai.types.BlockedReason.numerator": true,
+ "google.generativeai.types.BlockedReason.real": true,
+ "google.generativeai.types.BlockedReason.to_bytes": true,
+ "google.generativeai.types.BrokenResponseError": false,
+ "google.generativeai.types.BrokenResponseError.__eq__": true,
+ "google.generativeai.types.BrokenResponseError.__ge__": true,
+ "google.generativeai.types.BrokenResponseError.__gt__": true,
+ "google.generativeai.types.BrokenResponseError.__init__": true,
+ "google.generativeai.types.BrokenResponseError.__le__": true,
+ "google.generativeai.types.BrokenResponseError.__lt__": true,
+ "google.generativeai.types.BrokenResponseError.__ne__": true,
+ "google.generativeai.types.BrokenResponseError.__new__": true,
+ "google.generativeai.types.BrokenResponseError.add_note": true,
+ "google.generativeai.types.BrokenResponseError.args": true,
+ "google.generativeai.types.BrokenResponseError.with_traceback": true,
+ "google.generativeai.types.CallableFunctionDeclaration": false,
+ "google.generativeai.types.CallableFunctionDeclaration.__call__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__eq__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__ge__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__gt__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__init__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__le__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__lt__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__ne__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.__new__": true,
+ "google.generativeai.types.CallableFunctionDeclaration.description": true,
+ "google.generativeai.types.CallableFunctionDeclaration.from_function": true,
+ "google.generativeai.types.CallableFunctionDeclaration.from_proto": true,
+ "google.generativeai.types.CallableFunctionDeclaration.name": true,
+ "google.generativeai.types.CallableFunctionDeclaration.parameters": true,
+ "google.generativeai.types.CallableFunctionDeclaration.to_proto": true,
+ "google.generativeai.types.CitationMetadataDict": false,
+ "google.generativeai.types.CitationMetadataDict.__contains__": true,
+ "google.generativeai.types.CitationMetadataDict.__eq__": true,
+ "google.generativeai.types.CitationMetadataDict.__ge__": true,
+ "google.generativeai.types.CitationMetadataDict.__getitem__": true,
+ "google.generativeai.types.CitationMetadataDict.__gt__": true,
+ "google.generativeai.types.CitationMetadataDict.__init__": true,
+ "google.generativeai.types.CitationMetadataDict.__iter__": true,
+ "google.generativeai.types.CitationMetadataDict.__le__": true,
+ "google.generativeai.types.CitationMetadataDict.__len__": true,
+ "google.generativeai.types.CitationMetadataDict.__lt__": true,
+ "google.generativeai.types.CitationMetadataDict.__ne__": true,
+ "google.generativeai.types.CitationMetadataDict.__new__": true,
+ "google.generativeai.types.CitationMetadataDict.__or__": true,
+ "google.generativeai.types.CitationMetadataDict.__ror__": true,
+ "google.generativeai.types.CitationMetadataDict.clear": true,
+ "google.generativeai.types.CitationMetadataDict.copy": true,
+ "google.generativeai.types.CitationMetadataDict.fromkeys": true,
+ "google.generativeai.types.CitationMetadataDict.get": true,
+ "google.generativeai.types.CitationMetadataDict.items": true,
+ "google.generativeai.types.CitationMetadataDict.keys": true,
+ "google.generativeai.types.CitationMetadataDict.pop": true,
+ "google.generativeai.types.CitationMetadataDict.popitem": true,
+ "google.generativeai.types.CitationMetadataDict.setdefault": true,
+ "google.generativeai.types.CitationMetadataDict.update": true,
+ "google.generativeai.types.CitationMetadataDict.values": true,
+ "google.generativeai.types.CitationSourceDict": false,
+ "google.generativeai.types.CitationSourceDict.__contains__": true,
+ "google.generativeai.types.CitationSourceDict.__eq__": true,
+ "google.generativeai.types.CitationSourceDict.__ge__": true,
+ "google.generativeai.types.CitationSourceDict.__getitem__": true,
+ "google.generativeai.types.CitationSourceDict.__gt__": true,
+ "google.generativeai.types.CitationSourceDict.__init__": true,
+ "google.generativeai.types.CitationSourceDict.__iter__": true,
+ "google.generativeai.types.CitationSourceDict.__le__": true,
+ "google.generativeai.types.CitationSourceDict.__len__": true,
+ "google.generativeai.types.CitationSourceDict.__lt__": true,
+ "google.generativeai.types.CitationSourceDict.__ne__": true,
+ "google.generativeai.types.CitationSourceDict.__new__": true,
+ "google.generativeai.types.CitationSourceDict.__or__": true,
+ "google.generativeai.types.CitationSourceDict.__ror__": true,
+ "google.generativeai.types.CitationSourceDict.clear": true,
+ "google.generativeai.types.CitationSourceDict.copy": true,
+ "google.generativeai.types.CitationSourceDict.fromkeys": true,
+ "google.generativeai.types.CitationSourceDict.get": true,
+ "google.generativeai.types.CitationSourceDict.items": true,
+ "google.generativeai.types.CitationSourceDict.keys": true,
+ "google.generativeai.types.CitationSourceDict.pop": true,
+ "google.generativeai.types.CitationSourceDict.popitem": true,
+ "google.generativeai.types.CitationSourceDict.setdefault": true,
+ "google.generativeai.types.CitationSourceDict.update": true,
+ "google.generativeai.types.CitationSourceDict.values": true,
+ "google.generativeai.types.ContentDict": false,
+ "google.generativeai.types.ContentDict.__contains__": true,
+ "google.generativeai.types.ContentDict.__eq__": true,
+ "google.generativeai.types.ContentDict.__ge__": true,
+ "google.generativeai.types.ContentDict.__getitem__": true,
+ "google.generativeai.types.ContentDict.__gt__": true,
+ "google.generativeai.types.ContentDict.__init__": true,
+ "google.generativeai.types.ContentDict.__iter__": true,
+ "google.generativeai.types.ContentDict.__le__": true,
+ "google.generativeai.types.ContentDict.__len__": true,
+ "google.generativeai.types.ContentDict.__lt__": true,
+ "google.generativeai.types.ContentDict.__ne__": true,
+ "google.generativeai.types.ContentDict.__new__": true,
+ "google.generativeai.types.ContentDict.__or__": true,
+ "google.generativeai.types.ContentDict.__ror__": true,
+ "google.generativeai.types.ContentDict.clear": true,
+ "google.generativeai.types.ContentDict.copy": true,
+ "google.generativeai.types.ContentDict.fromkeys": true,
+ "google.generativeai.types.ContentDict.get": true,
+ "google.generativeai.types.ContentDict.items": true,
+ "google.generativeai.types.ContentDict.keys": true,
+ "google.generativeai.types.ContentDict.pop": true,
+ "google.generativeai.types.ContentDict.popitem": true,
+ "google.generativeai.types.ContentDict.setdefault": true,
+ "google.generativeai.types.ContentDict.update": true,
+ "google.generativeai.types.ContentDict.values": true,
+ "google.generativeai.types.ContentFilterDict": false,
+ "google.generativeai.types.ContentFilterDict.__contains__": true,
+ "google.generativeai.types.ContentFilterDict.__eq__": true,
+ "google.generativeai.types.ContentFilterDict.__ge__": true,
+ "google.generativeai.types.ContentFilterDict.__getitem__": true,
+ "google.generativeai.types.ContentFilterDict.__gt__": true,
+ "google.generativeai.types.ContentFilterDict.__init__": true,
+ "google.generativeai.types.ContentFilterDict.__iter__": true,
+ "google.generativeai.types.ContentFilterDict.__le__": true,
+ "google.generativeai.types.ContentFilterDict.__len__": true,
+ "google.generativeai.types.ContentFilterDict.__lt__": true,
+ "google.generativeai.types.ContentFilterDict.__ne__": true,
+ "google.generativeai.types.ContentFilterDict.__new__": true,
+ "google.generativeai.types.ContentFilterDict.__or__": true,
+ "google.generativeai.types.ContentFilterDict.__ror__": true,
+ "google.generativeai.types.ContentFilterDict.clear": true,
+ "google.generativeai.types.ContentFilterDict.copy": true,
+ "google.generativeai.types.ContentFilterDict.fromkeys": true,
+ "google.generativeai.types.ContentFilterDict.get": true,
+ "google.generativeai.types.ContentFilterDict.items": true,
+ "google.generativeai.types.ContentFilterDict.keys": true,
+ "google.generativeai.types.ContentFilterDict.pop": true,
+ "google.generativeai.types.ContentFilterDict.popitem": true,
+ "google.generativeai.types.ContentFilterDict.setdefault": true,
+ "google.generativeai.types.ContentFilterDict.update": true,
+ "google.generativeai.types.ContentFilterDict.values": true,
+ "google.generativeai.types.ContentType": false,
+ "google.generativeai.types.ContentsType": false,
+ "google.generativeai.types.File": false,
+ "google.generativeai.types.File.__eq__": true,
+ "google.generativeai.types.File.__ge__": true,
+ "google.generativeai.types.File.__gt__": true,
+ "google.generativeai.types.File.__init__": true,
+ "google.generativeai.types.File.__le__": true,
+ "google.generativeai.types.File.__lt__": true,
+ "google.generativeai.types.File.__ne__": true,
+ "google.generativeai.types.File.__new__": true,
+ "google.generativeai.types.File.create_time": true,
+ "google.generativeai.types.File.delete": true,
+ "google.generativeai.types.File.display_name": true,
+ "google.generativeai.types.File.error": true,
+ "google.generativeai.types.File.expiration_time": true,
+ "google.generativeai.types.File.mime_type": true,
+ "google.generativeai.types.File.name": true,
+ "google.generativeai.types.File.sha256_hash": true,
+ "google.generativeai.types.File.size_bytes": true,
+ "google.generativeai.types.File.state": true,
+ "google.generativeai.types.File.to_dict": true,
+ "google.generativeai.types.File.to_proto": true,
+ "google.generativeai.types.File.update_time": true,
+ "google.generativeai.types.File.uri": true,
+ "google.generativeai.types.File.video_metadata": true,
+ "google.generativeai.types.FileDataDict": false,
+ "google.generativeai.types.FileDataDict.__contains__": true,
+ "google.generativeai.types.FileDataDict.__eq__": true,
+ "google.generativeai.types.FileDataDict.__ge__": true,
+ "google.generativeai.types.FileDataDict.__getitem__": true,
+ "google.generativeai.types.FileDataDict.__gt__": true,
+ "google.generativeai.types.FileDataDict.__init__": true,
+ "google.generativeai.types.FileDataDict.__iter__": true,
+ "google.generativeai.types.FileDataDict.__le__": true,
+ "google.generativeai.types.FileDataDict.__len__": true,
+ "google.generativeai.types.FileDataDict.__lt__": true,
+ "google.generativeai.types.FileDataDict.__ne__": true,
+ "google.generativeai.types.FileDataDict.__new__": true,
+ "google.generativeai.types.FileDataDict.__or__": true,
+ "google.generativeai.types.FileDataDict.__ror__": true,
+ "google.generativeai.types.FileDataDict.clear": true,
+ "google.generativeai.types.FileDataDict.copy": true,
+ "google.generativeai.types.FileDataDict.fromkeys": true,
+ "google.generativeai.types.FileDataDict.get": true,
+ "google.generativeai.types.FileDataDict.items": true,
+ "google.generativeai.types.FileDataDict.keys": true,
+ "google.generativeai.types.FileDataDict.pop": true,
+ "google.generativeai.types.FileDataDict.popitem": true,
+ "google.generativeai.types.FileDataDict.setdefault": true,
+ "google.generativeai.types.FileDataDict.update": true,
+ "google.generativeai.types.FileDataDict.values": true,
+ "google.generativeai.types.FileDataType": false,
+ "google.generativeai.types.FunctionDeclaration": false,
+ "google.generativeai.types.FunctionDeclaration.__eq__": true,
+ "google.generativeai.types.FunctionDeclaration.__ge__": true,
+ "google.generativeai.types.FunctionDeclaration.__gt__": true,
+ "google.generativeai.types.FunctionDeclaration.__init__": true,
+ "google.generativeai.types.FunctionDeclaration.__le__": true,
+ "google.generativeai.types.FunctionDeclaration.__lt__": true,
+ "google.generativeai.types.FunctionDeclaration.__ne__": true,
+ "google.generativeai.types.FunctionDeclaration.__new__": true,
+ "google.generativeai.types.FunctionDeclaration.description": true,
+ "google.generativeai.types.FunctionDeclaration.from_function": true,
+ "google.generativeai.types.FunctionDeclaration.from_proto": true,
+ "google.generativeai.types.FunctionDeclaration.name": true,
+ "google.generativeai.types.FunctionDeclaration.parameters": true,
+ "google.generativeai.types.FunctionDeclaration.to_proto": true,
+ "google.generativeai.types.FunctionDeclarationType": false,
+ "google.generativeai.types.FunctionLibrary": false,
+ "google.generativeai.types.FunctionLibrary.__call__": true,
+ "google.generativeai.types.FunctionLibrary.__eq__": true,
+ "google.generativeai.types.FunctionLibrary.__ge__": true,
+ "google.generativeai.types.FunctionLibrary.__getitem__": true,
+ "google.generativeai.types.FunctionLibrary.__gt__": true,
+ "google.generativeai.types.FunctionLibrary.__init__": true,
+ "google.generativeai.types.FunctionLibrary.__le__": true,
+ "google.generativeai.types.FunctionLibrary.__lt__": true,
+ "google.generativeai.types.FunctionLibrary.__ne__": true,
+ "google.generativeai.types.FunctionLibrary.__new__": true,
+ "google.generativeai.types.FunctionLibrary.to_proto": true,
+ "google.generativeai.types.FunctionLibraryType": false,
+ "google.generativeai.types.GenerateContentResponse": false,
+ "google.generativeai.types.GenerateContentResponse.__eq__": true,
+ "google.generativeai.types.GenerateContentResponse.__ge__": true,
+ "google.generativeai.types.GenerateContentResponse.__gt__": true,
+ "google.generativeai.types.GenerateContentResponse.__init__": true,
+ "google.generativeai.types.GenerateContentResponse.__iter__": true,
+ "google.generativeai.types.GenerateContentResponse.__le__": true,
+ "google.generativeai.types.GenerateContentResponse.__lt__": true,
+ "google.generativeai.types.GenerateContentResponse.__ne__": true,
+ "google.generativeai.types.GenerateContentResponse.__new__": true,
+ "google.generativeai.types.GenerateContentResponse.candidates": true,
+ "google.generativeai.types.GenerateContentResponse.from_iterator": true,
+ "google.generativeai.types.GenerateContentResponse.from_response": true,
+ "google.generativeai.types.GenerateContentResponse.parts": true,
+ "google.generativeai.types.GenerateContentResponse.prompt_feedback": true,
+ "google.generativeai.types.GenerateContentResponse.resolve": true,
+ "google.generativeai.types.GenerateContentResponse.text": true,
+ "google.generativeai.types.GenerateContentResponse.to_dict": true,
+ "google.generativeai.types.GenerateContentResponse.usage_metadata": true,
+ "google.generativeai.types.GenerationConfig": false,
+ "google.generativeai.types.GenerationConfig.__eq__": true,
+ "google.generativeai.types.GenerationConfig.__ge__": true,
+ "google.generativeai.types.GenerationConfig.__gt__": true,
+ "google.generativeai.types.GenerationConfig.__init__": true,
+ "google.generativeai.types.GenerationConfig.__le__": true,
+ "google.generativeai.types.GenerationConfig.__lt__": true,
+ "google.generativeai.types.GenerationConfig.__ne__": true,
+ "google.generativeai.types.GenerationConfig.__new__": true,
+ "google.generativeai.types.GenerationConfig.candidate_count": true,
+ "google.generativeai.types.GenerationConfig.frequency_penalty": true,
+ "google.generativeai.types.GenerationConfig.logprobs": true,
+ "google.generativeai.types.GenerationConfig.max_output_tokens": true,
+ "google.generativeai.types.GenerationConfig.presence_penalty": true,
+ "google.generativeai.types.GenerationConfig.response_logprobs": true,
+ "google.generativeai.types.GenerationConfig.response_mime_type": true,
+ "google.generativeai.types.GenerationConfig.response_schema": true,
+ "google.generativeai.types.GenerationConfig.seed": true,
+ "google.generativeai.types.GenerationConfig.stop_sequences": true,
+ "google.generativeai.types.GenerationConfig.temperature": true,
+ "google.generativeai.types.GenerationConfig.top_k": true,
+ "google.generativeai.types.GenerationConfig.top_p": true,
+ "google.generativeai.types.GenerationConfigDict": false,
+ "google.generativeai.types.GenerationConfigDict.__contains__": true,
+ "google.generativeai.types.GenerationConfigDict.__eq__": true,
+ "google.generativeai.types.GenerationConfigDict.__ge__": true,
+ "google.generativeai.types.GenerationConfigDict.__getitem__": true,
+ "google.generativeai.types.GenerationConfigDict.__gt__": true,
+ "google.generativeai.types.GenerationConfigDict.__init__": true,
+ "google.generativeai.types.GenerationConfigDict.__iter__": true,
+ "google.generativeai.types.GenerationConfigDict.__le__": true,
+ "google.generativeai.types.GenerationConfigDict.__len__": true,
+ "google.generativeai.types.GenerationConfigDict.__lt__": true,
+ "google.generativeai.types.GenerationConfigDict.__ne__": true,
+ "google.generativeai.types.GenerationConfigDict.__new__": true,
+ "google.generativeai.types.GenerationConfigDict.__or__": true,
+ "google.generativeai.types.GenerationConfigDict.__ror__": true,
+ "google.generativeai.types.GenerationConfigDict.clear": true,
+ "google.generativeai.types.GenerationConfigDict.copy": true,
+ "google.generativeai.types.GenerationConfigDict.fromkeys": true,
+ "google.generativeai.types.GenerationConfigDict.get": true,
+ "google.generativeai.types.GenerationConfigDict.items": true,
+ "google.generativeai.types.GenerationConfigDict.keys": true,
+ "google.generativeai.types.GenerationConfigDict.pop": true,
+ "google.generativeai.types.GenerationConfigDict.popitem": true,
+ "google.generativeai.types.GenerationConfigDict.setdefault": true,
+ "google.generativeai.types.GenerationConfigDict.update": true,
+ "google.generativeai.types.GenerationConfigDict.values": true,
+ "google.generativeai.types.GenerationConfigType": false,
+ "google.generativeai.types.HarmBlockThreshold": false,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE": true,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE": true,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_NONE": true,
+ "google.generativeai.types.HarmBlockThreshold.BLOCK_ONLY_HIGH": true,
+ "google.generativeai.types.HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED": true,
+ "google.generativeai.types.HarmBlockThreshold.OFF": true,
+ "google.generativeai.types.HarmBlockThreshold.__abs__": true,
+ "google.generativeai.types.HarmBlockThreshold.__add__": true,
+ "google.generativeai.types.HarmBlockThreshold.__and__": true,
+ "google.generativeai.types.HarmBlockThreshold.__bool__": true,
+ "google.generativeai.types.HarmBlockThreshold.__contains__": true,
+ "google.generativeai.types.HarmBlockThreshold.__eq__": true,
+ "google.generativeai.types.HarmBlockThreshold.__floordiv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__ge__": true,
+ "google.generativeai.types.HarmBlockThreshold.__getitem__": true,
+ "google.generativeai.types.HarmBlockThreshold.__gt__": true,
+ "google.generativeai.types.HarmBlockThreshold.__init__": true,
+ "google.generativeai.types.HarmBlockThreshold.__invert__": true,
+ "google.generativeai.types.HarmBlockThreshold.__iter__": true,
+ "google.generativeai.types.HarmBlockThreshold.__le__": true,
+ "google.generativeai.types.HarmBlockThreshold.__len__": true,
+ "google.generativeai.types.HarmBlockThreshold.__lshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__lt__": true,
+ "google.generativeai.types.HarmBlockThreshold.__mod__": true,
+ "google.generativeai.types.HarmBlockThreshold.__mul__": true,
+ "google.generativeai.types.HarmBlockThreshold.__ne__": true,
+ "google.generativeai.types.HarmBlockThreshold.__neg__": true,
+ "google.generativeai.types.HarmBlockThreshold.__new__": true,
+ "google.generativeai.types.HarmBlockThreshold.__or__": true,
+ "google.generativeai.types.HarmBlockThreshold.__pos__": true,
+ "google.generativeai.types.HarmBlockThreshold.__pow__": true,
+ "google.generativeai.types.HarmBlockThreshold.__radd__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rand__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rfloordiv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rlshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rmod__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rmul__": true,
+ "google.generativeai.types.HarmBlockThreshold.__ror__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rpow__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rrshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rshift__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rsub__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rtruediv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__rxor__": true,
+ "google.generativeai.types.HarmBlockThreshold.__sub__": true,
+ "google.generativeai.types.HarmBlockThreshold.__truediv__": true,
+ "google.generativeai.types.HarmBlockThreshold.__xor__": true,
+ "google.generativeai.types.HarmBlockThreshold.as_integer_ratio": true,
+ "google.generativeai.types.HarmBlockThreshold.bit_count": true,
+ "google.generativeai.types.HarmBlockThreshold.bit_length": true,
+ "google.generativeai.types.HarmBlockThreshold.conjugate": true,
+ "google.generativeai.types.HarmBlockThreshold.denominator": true,
+ "google.generativeai.types.HarmBlockThreshold.from_bytes": true,
+ "google.generativeai.types.HarmBlockThreshold.imag": true,
+ "google.generativeai.types.HarmBlockThreshold.is_integer": true,
+ "google.generativeai.types.HarmBlockThreshold.numerator": true,
+ "google.generativeai.types.HarmBlockThreshold.real": true,
+ "google.generativeai.types.HarmBlockThreshold.to_bytes": true,
+ "google.generativeai.types.HarmCategory": false,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_HARASSMENT": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT": true,
+ "google.generativeai.types.HarmCategory.HARM_CATEGORY_UNSPECIFIED": true,
+ "google.generativeai.types.HarmCategory.__abs__": true,
+ "google.generativeai.types.HarmCategory.__add__": true,
+ "google.generativeai.types.HarmCategory.__and__": true,
+ "google.generativeai.types.HarmCategory.__bool__": true,
+ "google.generativeai.types.HarmCategory.__contains__": true,
+ "google.generativeai.types.HarmCategory.__eq__": true,
+ "google.generativeai.types.HarmCategory.__floordiv__": true,
+ "google.generativeai.types.HarmCategory.__ge__": true,
+ "google.generativeai.types.HarmCategory.__getitem__": true,
+ "google.generativeai.types.HarmCategory.__gt__": true,
+ "google.generativeai.types.HarmCategory.__init__": true,
+ "google.generativeai.types.HarmCategory.__invert__": true,
+ "google.generativeai.types.HarmCategory.__iter__": true,
+ "google.generativeai.types.HarmCategory.__le__": true,
+ "google.generativeai.types.HarmCategory.__len__": true,
+ "google.generativeai.types.HarmCategory.__lshift__": true,
+ "google.generativeai.types.HarmCategory.__lt__": true,
+ "google.generativeai.types.HarmCategory.__mod__": true,
+ "google.generativeai.types.HarmCategory.__mul__": true,
+ "google.generativeai.types.HarmCategory.__ne__": true,
+ "google.generativeai.types.HarmCategory.__neg__": true,
+ "google.generativeai.types.HarmCategory.__new__": true,
+ "google.generativeai.types.HarmCategory.__or__": true,
+ "google.generativeai.types.HarmCategory.__pos__": true,
+ "google.generativeai.types.HarmCategory.__pow__": true,
+ "google.generativeai.types.HarmCategory.__radd__": true,
+ "google.generativeai.types.HarmCategory.__rand__": true,
+ "google.generativeai.types.HarmCategory.__rfloordiv__": true,
+ "google.generativeai.types.HarmCategory.__rlshift__": true,
+ "google.generativeai.types.HarmCategory.__rmod__": true,
+ "google.generativeai.types.HarmCategory.__rmul__": true,
+ "google.generativeai.types.HarmCategory.__ror__": true,
+ "google.generativeai.types.HarmCategory.__rpow__": true,
+ "google.generativeai.types.HarmCategory.__rrshift__": true,
+ "google.generativeai.types.HarmCategory.__rshift__": true,
+ "google.generativeai.types.HarmCategory.__rsub__": true,
+ "google.generativeai.types.HarmCategory.__rtruediv__": true,
+ "google.generativeai.types.HarmCategory.__rxor__": true,
+ "google.generativeai.types.HarmCategory.__sub__": true,
+ "google.generativeai.types.HarmCategory.__truediv__": true,
+ "google.generativeai.types.HarmCategory.__xor__": true,
+ "google.generativeai.types.HarmCategory.as_integer_ratio": true,
+ "google.generativeai.types.HarmCategory.bit_count": true,
+ "google.generativeai.types.HarmCategory.bit_length": true,
+ "google.generativeai.types.HarmCategory.conjugate": true,
+ "google.generativeai.types.HarmCategory.denominator": true,
+ "google.generativeai.types.HarmCategory.from_bytes": true,
+ "google.generativeai.types.HarmCategory.imag": true,
+ "google.generativeai.types.HarmCategory.is_integer": true,
+ "google.generativeai.types.HarmCategory.numerator": true,
+ "google.generativeai.types.HarmCategory.real": true,
+ "google.generativeai.types.HarmCategory.to_bytes": true,
+ "google.generativeai.types.HarmProbability": false,
+ "google.generativeai.types.HarmProbability.HARM_PROBABILITY_UNSPECIFIED": true,
+ "google.generativeai.types.HarmProbability.HIGH": true,
+ "google.generativeai.types.HarmProbability.LOW": true,
+ "google.generativeai.types.HarmProbability.MEDIUM": true,
+ "google.generativeai.types.HarmProbability.NEGLIGIBLE": true,
+ "google.generativeai.types.HarmProbability.__abs__": true,
+ "google.generativeai.types.HarmProbability.__add__": true,
+ "google.generativeai.types.HarmProbability.__and__": true,
+ "google.generativeai.types.HarmProbability.__bool__": true,
+ "google.generativeai.types.HarmProbability.__contains__": true,
+ "google.generativeai.types.HarmProbability.__eq__": true,
+ "google.generativeai.types.HarmProbability.__floordiv__": true,
+ "google.generativeai.types.HarmProbability.__ge__": true,
+ "google.generativeai.types.HarmProbability.__getitem__": true,
+ "google.generativeai.types.HarmProbability.__gt__": true,
+ "google.generativeai.types.HarmProbability.__init__": true,
+ "google.generativeai.types.HarmProbability.__invert__": true,
+ "google.generativeai.types.HarmProbability.__iter__": true,
+ "google.generativeai.types.HarmProbability.__le__": true,
+ "google.generativeai.types.HarmProbability.__len__": true,
+ "google.generativeai.types.HarmProbability.__lshift__": true,
+ "google.generativeai.types.HarmProbability.__lt__": true,
+ "google.generativeai.types.HarmProbability.__mod__": true,
+ "google.generativeai.types.HarmProbability.__mul__": true,
+ "google.generativeai.types.HarmProbability.__ne__": true,
+ "google.generativeai.types.HarmProbability.__neg__": true,
+ "google.generativeai.types.HarmProbability.__new__": true,
+ "google.generativeai.types.HarmProbability.__or__": true,
+ "google.generativeai.types.HarmProbability.__pos__": true,
+ "google.generativeai.types.HarmProbability.__pow__": true,
+ "google.generativeai.types.HarmProbability.__radd__": true,
+ "google.generativeai.types.HarmProbability.__rand__": true,
+ "google.generativeai.types.HarmProbability.__rfloordiv__": true,
+ "google.generativeai.types.HarmProbability.__rlshift__": true,
+ "google.generativeai.types.HarmProbability.__rmod__": true,
+ "google.generativeai.types.HarmProbability.__rmul__": true,
+ "google.generativeai.types.HarmProbability.__ror__": true,
+ "google.generativeai.types.HarmProbability.__rpow__": true,
+ "google.generativeai.types.HarmProbability.__rrshift__": true,
+ "google.generativeai.types.HarmProbability.__rshift__": true,
+ "google.generativeai.types.HarmProbability.__rsub__": true,
+ "google.generativeai.types.HarmProbability.__rtruediv__": true,
+ "google.generativeai.types.HarmProbability.__rxor__": true,
+ "google.generativeai.types.HarmProbability.__sub__": true,
+ "google.generativeai.types.HarmProbability.__truediv__": true,
+ "google.generativeai.types.HarmProbability.__xor__": true,
+ "google.generativeai.types.HarmProbability.as_integer_ratio": true,
+ "google.generativeai.types.HarmProbability.bit_count": true,
+ "google.generativeai.types.HarmProbability.bit_length": true,
+ "google.generativeai.types.HarmProbability.conjugate": true,
+ "google.generativeai.types.HarmProbability.denominator": true,
+ "google.generativeai.types.HarmProbability.from_bytes": true,
+ "google.generativeai.types.HarmProbability.imag": true,
+ "google.generativeai.types.HarmProbability.is_integer": true,
+ "google.generativeai.types.HarmProbability.numerator": true,
+ "google.generativeai.types.HarmProbability.real": true,
+ "google.generativeai.types.HarmProbability.to_bytes": true,
+ "google.generativeai.types.IncompleteIterationError": false,
+ "google.generativeai.types.IncompleteIterationError.__eq__": true,
+ "google.generativeai.types.IncompleteIterationError.__ge__": true,
+ "google.generativeai.types.IncompleteIterationError.__gt__": true,
+ "google.generativeai.types.IncompleteIterationError.__init__": true,
+ "google.generativeai.types.IncompleteIterationError.__le__": true,
+ "google.generativeai.types.IncompleteIterationError.__lt__": true,
+ "google.generativeai.types.IncompleteIterationError.__ne__": true,
+ "google.generativeai.types.IncompleteIterationError.__new__": true,
+ "google.generativeai.types.IncompleteIterationError.add_note": true,
+ "google.generativeai.types.IncompleteIterationError.args": true,
+ "google.generativeai.types.IncompleteIterationError.with_traceback": true,
+ "google.generativeai.types.Model": false,
+ "google.generativeai.types.Model.__eq__": true,
+ "google.generativeai.types.Model.__ge__": true,
+ "google.generativeai.types.Model.__gt__": true,
+ "google.generativeai.types.Model.__init__": true,
+ "google.generativeai.types.Model.__le__": true,
+ "google.generativeai.types.Model.__lt__": true,
+ "google.generativeai.types.Model.__ne__": true,
+ "google.generativeai.types.Model.__new__": true,
+ "google.generativeai.types.Model.max_temperature": true,
+ "google.generativeai.types.Model.temperature": true,
+ "google.generativeai.types.Model.top_k": true,
+ "google.generativeai.types.Model.top_p": true,
+ "google.generativeai.types.ModelNameOptions": false,
+ "google.generativeai.types.ModelsIterable": false,
+ "google.generativeai.types.PartDict": false,
+ "google.generativeai.types.PartDict.__contains__": true,
+ "google.generativeai.types.PartDict.__eq__": true,
+ "google.generativeai.types.PartDict.__ge__": true,
+ "google.generativeai.types.PartDict.__getitem__": true,
+ "google.generativeai.types.PartDict.__gt__": true,
+ "google.generativeai.types.PartDict.__init__": true,
+ "google.generativeai.types.PartDict.__iter__": true,
+ "google.generativeai.types.PartDict.__le__": true,
+ "google.generativeai.types.PartDict.__len__": true,
+ "google.generativeai.types.PartDict.__lt__": true,
+ "google.generativeai.types.PartDict.__ne__": true,
+ "google.generativeai.types.PartDict.__new__": true,
+ "google.generativeai.types.PartDict.__or__": true,
+ "google.generativeai.types.PartDict.__ror__": true,
+ "google.generativeai.types.PartDict.clear": true,
+ "google.generativeai.types.PartDict.copy": true,
+ "google.generativeai.types.PartDict.fromkeys": true,
+ "google.generativeai.types.PartDict.get": true,
+ "google.generativeai.types.PartDict.items": true,
+ "google.generativeai.types.PartDict.keys": true,
+ "google.generativeai.types.PartDict.pop": true,
+ "google.generativeai.types.PartDict.popitem": true,
+ "google.generativeai.types.PartDict.setdefault": true,
+ "google.generativeai.types.PartDict.update": true,
+ "google.generativeai.types.PartDict.values": true,
+ "google.generativeai.types.PartType": false,
+ "google.generativeai.types.Permission": false,
+ "google.generativeai.types.Permission.__eq__": true,
+ "google.generativeai.types.Permission.__ge__": true,
+ "google.generativeai.types.Permission.__gt__": true,
+ "google.generativeai.types.Permission.__init__": true,
+ "google.generativeai.types.Permission.__le__": true,
+ "google.generativeai.types.Permission.__lt__": true,
+ "google.generativeai.types.Permission.__ne__": true,
+ "google.generativeai.types.Permission.__new__": true,
+ "google.generativeai.types.Permission.delete": true,
+ "google.generativeai.types.Permission.delete_async": true,
+ "google.generativeai.types.Permission.email_address": true,
+ "google.generativeai.types.Permission.get": true,
+ "google.generativeai.types.Permission.get_async": true,
+ "google.generativeai.types.Permission.to_dict": true,
+ "google.generativeai.types.Permission.update": true,
+ "google.generativeai.types.Permission.update_async": true,
+ "google.generativeai.types.Permissions": false,
+ "google.generativeai.types.Permissions.__eq__": true,
+ "google.generativeai.types.Permissions.__ge__": true,
+ "google.generativeai.types.Permissions.__gt__": true,
+ "google.generativeai.types.Permissions.__init__": true,
+ "google.generativeai.types.Permissions.__iter__": true,
+ "google.generativeai.types.Permissions.__le__": true,
+ "google.generativeai.types.Permissions.__lt__": true,
+ "google.generativeai.types.Permissions.__ne__": true,
+ "google.generativeai.types.Permissions.__new__": true,
+ "google.generativeai.types.Permissions.create": true,
+ "google.generativeai.types.Permissions.create_async": true,
+ "google.generativeai.types.Permissions.get": true,
+ "google.generativeai.types.Permissions.get_async": true,
+ "google.generativeai.types.Permissions.list": true,
+ "google.generativeai.types.Permissions.list_async": true,
+ "google.generativeai.types.Permissions.parent": true,
+ "google.generativeai.types.Permissions.transfer_ownership": true,
+ "google.generativeai.types.Permissions.transfer_ownership_async": true,
+ "google.generativeai.types.RequestOptions": false,
+ "google.generativeai.types.RequestOptions.__contains__": true,
+ "google.generativeai.types.RequestOptions.__eq__": true,
+ "google.generativeai.types.RequestOptions.__ge__": true,
+ "google.generativeai.types.RequestOptions.__getitem__": true,
+ "google.generativeai.types.RequestOptions.__gt__": true,
+ "google.generativeai.types.RequestOptions.__init__": true,
+ "google.generativeai.types.RequestOptions.__iter__": true,
+ "google.generativeai.types.RequestOptions.__le__": true,
+ "google.generativeai.types.RequestOptions.__len__": true,
+ "google.generativeai.types.RequestOptions.__lt__": true,
+ "google.generativeai.types.RequestOptions.__ne__": true,
+ "google.generativeai.types.RequestOptions.__new__": true,
+ "google.generativeai.types.RequestOptions.get": true,
+ "google.generativeai.types.RequestOptions.items": true,
+ "google.generativeai.types.RequestOptions.keys": true,
+ "google.generativeai.types.RequestOptions.values": true,
+ "google.generativeai.types.RequestOptionsType": false,
+ "google.generativeai.types.SafetyFeedbackDict": false,
+ "google.generativeai.types.SafetyFeedbackDict.__contains__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__eq__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__ge__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__getitem__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__gt__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__init__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__iter__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__le__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__len__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__lt__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__ne__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__new__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__or__": true,
+ "google.generativeai.types.SafetyFeedbackDict.__ror__": true,
+ "google.generativeai.types.SafetyFeedbackDict.clear": true,
+ "google.generativeai.types.SafetyFeedbackDict.copy": true,
+ "google.generativeai.types.SafetyFeedbackDict.fromkeys": true,
+ "google.generativeai.types.SafetyFeedbackDict.get": true,
+ "google.generativeai.types.SafetyFeedbackDict.items": true,
+ "google.generativeai.types.SafetyFeedbackDict.keys": true,
+ "google.generativeai.types.SafetyFeedbackDict.pop": true,
+ "google.generativeai.types.SafetyFeedbackDict.popitem": true,
+ "google.generativeai.types.SafetyFeedbackDict.setdefault": true,
+ "google.generativeai.types.SafetyFeedbackDict.update": true,
+ "google.generativeai.types.SafetyFeedbackDict.values": true,
+ "google.generativeai.types.SafetyRatingDict": false,
+ "google.generativeai.types.SafetyRatingDict.__contains__": true,
+ "google.generativeai.types.SafetyRatingDict.__eq__": true,
+ "google.generativeai.types.SafetyRatingDict.__ge__": true,
+ "google.generativeai.types.SafetyRatingDict.__getitem__": true,
+ "google.generativeai.types.SafetyRatingDict.__gt__": true,
+ "google.generativeai.types.SafetyRatingDict.__init__": true,
+ "google.generativeai.types.SafetyRatingDict.__iter__": true,
+ "google.generativeai.types.SafetyRatingDict.__le__": true,
+ "google.generativeai.types.SafetyRatingDict.__len__": true,
+ "google.generativeai.types.SafetyRatingDict.__lt__": true,
+ "google.generativeai.types.SafetyRatingDict.__ne__": true,
+ "google.generativeai.types.SafetyRatingDict.__new__": true,
+ "google.generativeai.types.SafetyRatingDict.__or__": true,
+ "google.generativeai.types.SafetyRatingDict.__ror__": true,
+ "google.generativeai.types.SafetyRatingDict.clear": true,
+ "google.generativeai.types.SafetyRatingDict.copy": true,
+ "google.generativeai.types.SafetyRatingDict.fromkeys": true,
+ "google.generativeai.types.SafetyRatingDict.get": true,
+ "google.generativeai.types.SafetyRatingDict.items": true,
+ "google.generativeai.types.SafetyRatingDict.keys": true,
+ "google.generativeai.types.SafetyRatingDict.pop": true,
+ "google.generativeai.types.SafetyRatingDict.popitem": true,
+ "google.generativeai.types.SafetyRatingDict.setdefault": true,
+ "google.generativeai.types.SafetyRatingDict.update": true,
+ "google.generativeai.types.SafetyRatingDict.values": true,
+ "google.generativeai.types.SafetySettingDict": false,
+ "google.generativeai.types.SafetySettingDict.__contains__": true,
+ "google.generativeai.types.SafetySettingDict.__eq__": true,
+ "google.generativeai.types.SafetySettingDict.__ge__": true,
+ "google.generativeai.types.SafetySettingDict.__getitem__": true,
+ "google.generativeai.types.SafetySettingDict.__gt__": true,
+ "google.generativeai.types.SafetySettingDict.__init__": true,
+ "google.generativeai.types.SafetySettingDict.__iter__": true,
+ "google.generativeai.types.SafetySettingDict.__le__": true,
+ "google.generativeai.types.SafetySettingDict.__len__": true,
+ "google.generativeai.types.SafetySettingDict.__lt__": true,
+ "google.generativeai.types.SafetySettingDict.__ne__": true,
+ "google.generativeai.types.SafetySettingDict.__new__": true,
+ "google.generativeai.types.SafetySettingDict.__or__": true,
+ "google.generativeai.types.SafetySettingDict.__ror__": true,
+ "google.generativeai.types.SafetySettingDict.clear": true,
+ "google.generativeai.types.SafetySettingDict.copy": true,
+ "google.generativeai.types.SafetySettingDict.fromkeys": true,
+ "google.generativeai.types.SafetySettingDict.get": true,
+ "google.generativeai.types.SafetySettingDict.items": true,
+ "google.generativeai.types.SafetySettingDict.keys": true,
+ "google.generativeai.types.SafetySettingDict.pop": true,
+ "google.generativeai.types.SafetySettingDict.popitem": true,
+ "google.generativeai.types.SafetySettingDict.setdefault": true,
+ "google.generativeai.types.SafetySettingDict.update": true,
+ "google.generativeai.types.SafetySettingDict.values": true,
+ "google.generativeai.types.Status": false,
+ "google.generativeai.types.Status.ByteSize": true,
+ "google.generativeai.types.Status.Clear": true,
+ "google.generativeai.types.Status.ClearExtension": true,
+ "google.generativeai.types.Status.ClearField": true,
+ "google.generativeai.types.Status.CopyFrom": true,
+ "google.generativeai.types.Status.DESCRIPTOR": true,
+ "google.generativeai.types.Status.DiscardUnknownFields": true,
+ "google.generativeai.types.Status.Extensions": true,
+ "google.generativeai.types.Status.FindInitializationErrors": true,
+ "google.generativeai.types.Status.FromString": true,
+ "google.generativeai.types.Status.HasExtension": true,
+ "google.generativeai.types.Status.HasField": true,
+ "google.generativeai.types.Status.IsInitialized": true,
+ "google.generativeai.types.Status.ListFields": true,
+ "google.generativeai.types.Status.MergeFrom": true,
+ "google.generativeai.types.Status.MergeFromString": true,
+ "google.generativeai.types.Status.ParseFromString": true,
+ "google.generativeai.types.Status.RegisterExtension": true,
+ "google.generativeai.types.Status.SerializePartialToString": true,
+ "google.generativeai.types.Status.SerializeToString": true,
+ "google.generativeai.types.Status.SetInParent": true,
+ "google.generativeai.types.Status.UnknownFields": true,
+ "google.generativeai.types.Status.WhichOneof": true,
+ "google.generativeai.types.Status.__eq__": true,
+ "google.generativeai.types.Status.__ge__": true,
+ "google.generativeai.types.Status.__gt__": true,
+ "google.generativeai.types.Status.__init__": true,
+ "google.generativeai.types.Status.__le__": true,
+ "google.generativeai.types.Status.__lt__": true,
+ "google.generativeai.types.Status.__ne__": true,
+ "google.generativeai.types.Status.__new__": true,
+ "google.generativeai.types.Status.code": true,
+ "google.generativeai.types.Status.details": true,
+ "google.generativeai.types.Status.message": true,
+ "google.generativeai.types.StopCandidateException": false,
+ "google.generativeai.types.StopCandidateException.__eq__": true,
+ "google.generativeai.types.StopCandidateException.__ge__": true,
+ "google.generativeai.types.StopCandidateException.__gt__": true,
+ "google.generativeai.types.StopCandidateException.__init__": true,
+ "google.generativeai.types.StopCandidateException.__le__": true,
+ "google.generativeai.types.StopCandidateException.__lt__": true,
+ "google.generativeai.types.StopCandidateException.__ne__": true,
+ "google.generativeai.types.StopCandidateException.__new__": true,
+ "google.generativeai.types.StopCandidateException.add_note": true,
+ "google.generativeai.types.StopCandidateException.args": true,
+ "google.generativeai.types.StopCandidateException.with_traceback": true,
+ "google.generativeai.types.StrictContentType": false,
+ "google.generativeai.types.Tool": false,
+ "google.generativeai.types.Tool.__call__": true,
+ "google.generativeai.types.Tool.__eq__": true,
+ "google.generativeai.types.Tool.__ge__": true,
+ "google.generativeai.types.Tool.__getitem__": true,
+ "google.generativeai.types.Tool.__gt__": true,
+ "google.generativeai.types.Tool.__init__": true,
+ "google.generativeai.types.Tool.__le__": true,
+ "google.generativeai.types.Tool.__lt__": true,
+ "google.generativeai.types.Tool.__ne__": true,
+ "google.generativeai.types.Tool.__new__": true,
+ "google.generativeai.types.Tool.code_execution": true,
+ "google.generativeai.types.Tool.function_declarations": true,
+ "google.generativeai.types.Tool.google_search_retrieval": true,
+ "google.generativeai.types.Tool.to_proto": true,
+ "google.generativeai.types.ToolDict": false,
+ "google.generativeai.types.ToolDict.__contains__": true,
+ "google.generativeai.types.ToolDict.__eq__": true,
+ "google.generativeai.types.ToolDict.__ge__": true,
+ "google.generativeai.types.ToolDict.__getitem__": true,
+ "google.generativeai.types.ToolDict.__gt__": true,
+ "google.generativeai.types.ToolDict.__init__": true,
+ "google.generativeai.types.ToolDict.__iter__": true,
+ "google.generativeai.types.ToolDict.__le__": true,
+ "google.generativeai.types.ToolDict.__len__": true,
+ "google.generativeai.types.ToolDict.__lt__": true,
+ "google.generativeai.types.ToolDict.__ne__": true,
+ "google.generativeai.types.ToolDict.__new__": true,
+ "google.generativeai.types.ToolDict.__or__": true,
+ "google.generativeai.types.ToolDict.__ror__": true,
+ "google.generativeai.types.ToolDict.clear": true,
+ "google.generativeai.types.ToolDict.copy": true,
+ "google.generativeai.types.ToolDict.fromkeys": true,
+ "google.generativeai.types.ToolDict.get": true,
+ "google.generativeai.types.ToolDict.items": true,
+ "google.generativeai.types.ToolDict.keys": true,
+ "google.generativeai.types.ToolDict.pop": true,
+ "google.generativeai.types.ToolDict.popitem": true,
+ "google.generativeai.types.ToolDict.setdefault": true,
+ "google.generativeai.types.ToolDict.update": true,
+ "google.generativeai.types.ToolDict.values": true,
+ "google.generativeai.types.ToolsType": false,
+ "google.generativeai.types.TunedModel": false,
+ "google.generativeai.types.TunedModel.__eq__": true,
+ "google.generativeai.types.TunedModel.__ge__": true,
+ "google.generativeai.types.TunedModel.__gt__": true,
+ "google.generativeai.types.TunedModel.__init__": true,
+ "google.generativeai.types.TunedModel.__le__": true,
+ "google.generativeai.types.TunedModel.__lt__": true,
+ "google.generativeai.types.TunedModel.__ne__": true,
+ "google.generativeai.types.TunedModel.__new__": true,
+ "google.generativeai.types.TunedModel.base_model": true,
+ "google.generativeai.types.TunedModel.create_time": true,
+ "google.generativeai.types.TunedModel.description": true,
+ "google.generativeai.types.TunedModel.display_name": true,
+ "google.generativeai.types.TunedModel.name": true,
+ "google.generativeai.types.TunedModel.permissions": true,
+ "google.generativeai.types.TunedModel.reader_project_numbers": true,
+ "google.generativeai.types.TunedModel.source_model": true,
+ "google.generativeai.types.TunedModel.state": true,
+ "google.generativeai.types.TunedModel.temperature": true,
+ "google.generativeai.types.TunedModel.top_k": true,
+ "google.generativeai.types.TunedModel.top_p": true,
+ "google.generativeai.types.TunedModel.tuning_task": true,
+ "google.generativeai.types.TunedModel.update_time": true,
+ "google.generativeai.types.TunedModelNameOptions": false,
+ "google.generativeai.types.TunedModelState": false,
+ "google.generativeai.types.TunedModelState.ACTIVE": true,
+ "google.generativeai.types.TunedModelState.CREATING": true,
+ "google.generativeai.types.TunedModelState.FAILED": true,
+ "google.generativeai.types.TunedModelState.STATE_UNSPECIFIED": true,
+ "google.generativeai.types.TunedModelState.__abs__": true,
+ "google.generativeai.types.TunedModelState.__add__": true,
+ "google.generativeai.types.TunedModelState.__and__": true,
+ "google.generativeai.types.TunedModelState.__bool__": true,
+ "google.generativeai.types.TunedModelState.__contains__": true,
+ "google.generativeai.types.TunedModelState.__eq__": true,
+ "google.generativeai.types.TunedModelState.__floordiv__": true,
+ "google.generativeai.types.TunedModelState.__ge__": true,
+ "google.generativeai.types.TunedModelState.__getitem__": true,
+ "google.generativeai.types.TunedModelState.__gt__": true,
+ "google.generativeai.types.TunedModelState.__init__": true,
+ "google.generativeai.types.TunedModelState.__invert__": true,
+ "google.generativeai.types.TunedModelState.__iter__": true,
+ "google.generativeai.types.TunedModelState.__le__": true,
+ "google.generativeai.types.TunedModelState.__len__": true,
+ "google.generativeai.types.TunedModelState.__lshift__": true,
+ "google.generativeai.types.TunedModelState.__lt__": true,
+ "google.generativeai.types.TunedModelState.__mod__": true,
+ "google.generativeai.types.TunedModelState.__mul__": true,
+ "google.generativeai.types.TunedModelState.__ne__": true,
+ "google.generativeai.types.TunedModelState.__neg__": true,
+ "google.generativeai.types.TunedModelState.__new__": true,
+ "google.generativeai.types.TunedModelState.__or__": true,
+ "google.generativeai.types.TunedModelState.__pos__": true,
+ "google.generativeai.types.TunedModelState.__pow__": true,
+ "google.generativeai.types.TunedModelState.__radd__": true,
+ "google.generativeai.types.TunedModelState.__rand__": true,
+ "google.generativeai.types.TunedModelState.__rfloordiv__": true,
+ "google.generativeai.types.TunedModelState.__rlshift__": true,
+ "google.generativeai.types.TunedModelState.__rmod__": true,
+ "google.generativeai.types.TunedModelState.__rmul__": true,
+ "google.generativeai.types.TunedModelState.__ror__": true,
+ "google.generativeai.types.TunedModelState.__rpow__": true,
+ "google.generativeai.types.TunedModelState.__rrshift__": true,
+ "google.generativeai.types.TunedModelState.__rshift__": true,
+ "google.generativeai.types.TunedModelState.__rsub__": true,
+ "google.generativeai.types.TunedModelState.__rtruediv__": true,
+ "google.generativeai.types.TunedModelState.__rxor__": true,
+ "google.generativeai.types.TunedModelState.__sub__": true,
+ "google.generativeai.types.TunedModelState.__truediv__": true,
+ "google.generativeai.types.TunedModelState.__xor__": true,
+ "google.generativeai.types.TunedModelState.as_integer_ratio": true,
+ "google.generativeai.types.TunedModelState.bit_count": true,
+ "google.generativeai.types.TunedModelState.bit_length": true,
+ "google.generativeai.types.TunedModelState.conjugate": true,
+ "google.generativeai.types.TunedModelState.denominator": true,
+ "google.generativeai.types.TunedModelState.from_bytes": true,
+ "google.generativeai.types.TunedModelState.imag": true,
+ "google.generativeai.types.TunedModelState.is_integer": true,
+ "google.generativeai.types.TunedModelState.numerator": true,
+ "google.generativeai.types.TunedModelState.real": true,
+ "google.generativeai.types.TunedModelState.to_bytes": true,
+ "google.generativeai.types.TypedDict": false,
+ "google.generativeai.types.annotations": true,
+ "google.generativeai.types.get_default_file_client": false,
+ "google.generativeai.types.to_file_data": false,
+ "google.generativeai.update_tuned_model": false,
+ "google.generativeai.upload_file": false
+ },
+ "link_prefix": null,
+ "physical_path": {
+ "google.generativeai": "google.generativeai",
+ "google.generativeai.ChatSession": "google.generativeai.generative_models.ChatSession",
+ "google.generativeai.ChatSession.__init__": "google.generativeai.generative_models.ChatSession.__init__",
+ "google.generativeai.ChatSession.rewind": "google.generativeai.generative_models.ChatSession.rewind",
+ "google.generativeai.ChatSession.send_message": "google.generativeai.generative_models.ChatSession.send_message",
+ "google.generativeai.ChatSession.send_message_async": "google.generativeai.generative_models.ChatSession.send_message_async",
+ "google.generativeai.GenerativeModel": "google.generativeai.generative_models.GenerativeModel",
+ "google.generativeai.GenerativeModel.__init__": "google.generativeai.generative_models.GenerativeModel.__init__",
+ "google.generativeai.GenerativeModel.count_tokens": "google.generativeai.generative_models.GenerativeModel.count_tokens",
+ "google.generativeai.GenerativeModel.count_tokens_async": "google.generativeai.generative_models.GenerativeModel.count_tokens_async",
+ "google.generativeai.GenerativeModel.from_cached_content": "google.generativeai.generative_models.GenerativeModel.from_cached_content",
+ "google.generativeai.GenerativeModel.generate_content": "google.generativeai.generative_models.GenerativeModel.generate_content",
+ "google.generativeai.GenerativeModel.generate_content_async": "google.generativeai.generative_models.GenerativeModel.generate_content_async",
+ "google.generativeai.GenerativeModel.start_chat": "google.generativeai.generative_models.GenerativeModel.start_chat",
+ "google.generativeai.caching": "google.generativeai.caching",
+ "google.generativeai.caching.CachedContent": "google.generativeai.caching.CachedContent",
+ "google.generativeai.caching.CachedContent.__init__": "google.generativeai.caching.CachedContent.__init__",
+ "google.generativeai.caching.CachedContent.create": "google.generativeai.caching.CachedContent.create",
+ "google.generativeai.caching.CachedContent.delete": "google.generativeai.caching.CachedContent.delete",
+ "google.generativeai.caching.CachedContent.get": "google.generativeai.caching.CachedContent.get",
+ "google.generativeai.caching.CachedContent.list": "google.generativeai.caching.CachedContent.list",
+ "google.generativeai.caching.CachedContent.update": "google.generativeai.caching.CachedContent.update",
+ "google.generativeai.caching.get_default_cache_client": "google.generativeai.client.get_default_cache_client",
+ "google.generativeai.configure": "google.generativeai.client.configure",
+ "google.generativeai.create_tuned_model": "google.generativeai.models.create_tuned_model",
+ "google.generativeai.delete_file": "google.generativeai.files.delete_file",
+ "google.generativeai.delete_tuned_model": "google.generativeai.models.delete_tuned_model",
+ "google.generativeai.embed_content": "google.generativeai.embedding.embed_content",
+ "google.generativeai.embed_content_async": "google.generativeai.embedding.embed_content_async",
+ "google.generativeai.get_base_model": "google.generativeai.models.get_base_model",
+ "google.generativeai.get_file": "google.generativeai.files.get_file",
+ "google.generativeai.get_model": "google.generativeai.models.get_model",
+ "google.generativeai.get_operation": "google.generativeai.operations.get_operation",
+ "google.generativeai.get_tuned_model": "google.generativeai.models.get_tuned_model",
+ "google.generativeai.list_files": "google.generativeai.files.list_files",
+ "google.generativeai.list_models": "google.generativeai.models.list_models",
+ "google.generativeai.list_operations": "google.generativeai.operations.list_operations",
+ "google.generativeai.list_tuned_models": "google.generativeai.models.list_tuned_models",
+ "google.generativeai.protos": "google.generativeai.protos",
+ "google.generativeai.protos.AttributionSourceId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.GroundingPassageId",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__eq__": "proto.message.Message.__eq__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__init__": "proto.message.Message.__init__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.__ne__": "proto.message.Message.__ne__",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.AttributionSourceId.GroundingPassageId.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk": "google.ai.generativelanguage_v1beta.types.generative_service.AttributionSourceId.SemanticRetrieverChunk",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.AttributionSourceId.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.AttributionSourceId.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.AttributionSourceId.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.AttributionSourceId.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.AttributionSourceId.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.AttributionSourceId.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.AttributionSourceId.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.AttributionSourceId.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchCreateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksRequest",
+ "google.generativeai.protos.BatchCreateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchCreateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchCreateChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchCreateChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchCreateChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchCreateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchCreateChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchCreateChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchCreateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchCreateChunksResponse",
+ "google.generativeai.protos.BatchCreateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchCreateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchCreateChunksResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchCreateChunksResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchCreateChunksResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchCreateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchCreateChunksResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchCreateChunksResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchDeleteChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchDeleteChunksRequest",
+ "google.generativeai.protos.BatchDeleteChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchDeleteChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchDeleteChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchDeleteChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchDeleteChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchDeleteChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchDeleteChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedContentsRequest": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsRequest",
+ "google.generativeai.protos.BatchEmbedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedContentsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedContentsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedContentsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedContentsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedContentsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedContentsResponse": "google.ai.generativelanguage_v1beta.types.generative_service.BatchEmbedContentsResponse",
+ "google.generativeai.protos.BatchEmbedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedContentsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedContentsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedContentsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedContentsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedContentsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextRequest",
+ "google.generativeai.protos.BatchEmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedTextRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedTextRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedTextRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedTextRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedTextRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchEmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.BatchEmbedTextResponse",
+ "google.generativeai.protos.BatchEmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchEmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchEmbedTextResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchEmbedTextResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchEmbedTextResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchEmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchEmbedTextResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchEmbedTextResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchUpdateChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksRequest",
+ "google.generativeai.protos.BatchUpdateChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchUpdateChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchUpdateChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchUpdateChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchUpdateChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchUpdateChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchUpdateChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.BatchUpdateChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.BatchUpdateChunksResponse",
+ "google.generativeai.protos.BatchUpdateChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.BatchUpdateChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.BatchUpdateChunksResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.BatchUpdateChunksResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.BatchUpdateChunksResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.BatchUpdateChunksResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.BatchUpdateChunksResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Blob": "google.ai.generativelanguage_v1beta.types.content.Blob",
+ "google.generativeai.protos.Blob.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Blob.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Blob.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Blob.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Blob.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Blob.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Blob.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Blob.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CachedContent": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent",
+ "google.generativeai.protos.CachedContent.UsageMetadata": "google.ai.generativelanguage_v1beta.types.cached_content.CachedContent.UsageMetadata",
+ "google.generativeai.protos.CachedContent.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CachedContent.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CachedContent.UsageMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CachedContent.UsageMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CachedContent.UsageMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CachedContent.UsageMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CachedContent.UsageMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CachedContent.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CachedContent.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CachedContent.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CachedContent.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CachedContent.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CachedContent.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CachedContent.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CachedContent.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate",
+ "google.generativeai.protos.Candidate.FinishReason": "google.ai.generativelanguage_v1beta.types.generative_service.Candidate.FinishReason",
+ "google.generativeai.protos.Candidate.FinishReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Candidate.FinishReason.__eq__": "proto.enums.Enum.__eq__",
+ "google.generativeai.protos.Candidate.FinishReason.__ge__": "proto.enums.Enum.__ge__",
+ "google.generativeai.protos.Candidate.FinishReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Candidate.FinishReason.__gt__": "proto.enums.Enum.__gt__",
+ "google.generativeai.protos.Candidate.FinishReason.__init__": "enum.Enum.__init__",
+ "google.generativeai.protos.Candidate.FinishReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Candidate.FinishReason.__le__": "proto.enums.Enum.__le__",
+ "google.generativeai.protos.Candidate.FinishReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Candidate.FinishReason.__lt__": "proto.enums.Enum.__lt__",
+ "google.generativeai.protos.Candidate.FinishReason.__ne__": "proto.enums.Enum.__ne__",
+ "google.generativeai.protos.Candidate.FinishReason.__new__": "enum.Enum.__new__",
+ "google.generativeai.protos.Candidate.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Candidate.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Candidate.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Candidate.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Candidate.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Candidate.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Candidate.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Candidate.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Chunk": "google.ai.generativelanguage_v1beta.types.retriever.Chunk",
+ "google.generativeai.protos.Chunk.State": "google.ai.generativelanguage_v1beta.types.retriever.Chunk.State",
+ "google.generativeai.protos.Chunk.State.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Chunk.State.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Chunk.State.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Chunk.State.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Chunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Chunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Chunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Chunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Chunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Chunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Chunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Chunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ChunkData": "google.ai.generativelanguage_v1beta.types.retriever.ChunkData",
+ "google.generativeai.protos.ChunkData.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ChunkData.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ChunkData.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ChunkData.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ChunkData.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ChunkData.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ChunkData.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ChunkData.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CitationMetadata": "google.ai.generativelanguage_v1beta.types.citation.CitationMetadata",
+ "google.generativeai.protos.CitationMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CitationMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CitationMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CitationMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CitationMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CitationMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CitationMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CitationMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CitationSource": "google.ai.generativelanguage_v1beta.types.citation.CitationSource",
+ "google.generativeai.protos.CitationSource.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CitationSource.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CitationSource.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CitationSource.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CitationSource.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CitationSource.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CitationSource.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CitationSource.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CodeExecution": "google.ai.generativelanguage_v1beta.types.content.CodeExecution",
+ "google.generativeai.protos.CodeExecution.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CodeExecution.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CodeExecution.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CodeExecution.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CodeExecution.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CodeExecution.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CodeExecution.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CodeExecution.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CodeExecutionResult": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult",
+ "google.generativeai.protos.CodeExecutionResult.Outcome": "google.ai.generativelanguage_v1beta.types.content.CodeExecutionResult.Outcome",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.CodeExecutionResult.Outcome.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.CodeExecutionResult.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CodeExecutionResult.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CodeExecutionResult.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CodeExecutionResult.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CodeExecutionResult.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CodeExecutionResult.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CodeExecutionResult.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CodeExecutionResult.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Condition": "google.ai.generativelanguage_v1beta.types.retriever.Condition",
+ "google.generativeai.protos.Condition.Operator": "google.ai.generativelanguage_v1beta.types.retriever.Condition.Operator",
+ "google.generativeai.protos.Condition.Operator.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Condition.Operator.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Condition.Operator.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Condition.Operator.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Condition.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Condition.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Condition.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Condition.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Condition.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Condition.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Condition.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Condition.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Content": "google.ai.generativelanguage_v1beta.types.content.Content",
+ "google.generativeai.protos.Content.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Content.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Content.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Content.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Content.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Content.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Content.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Content.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ContentEmbedding": "google.ai.generativelanguage_v1beta.types.generative_service.ContentEmbedding",
+ "google.generativeai.protos.ContentEmbedding.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ContentEmbedding.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ContentEmbedding.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ContentEmbedding.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ContentEmbedding.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ContentEmbedding.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ContentEmbedding.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ContentEmbedding.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ContentFilter": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter",
+ "google.generativeai.protos.ContentFilter.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ContentFilter.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ContentFilter.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ContentFilter.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ContentFilter.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ContentFilter.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ContentFilter.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ContentFilter.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Corpus": "google.ai.generativelanguage_v1beta.types.retriever.Corpus",
+ "google.generativeai.protos.Corpus.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Corpus.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Corpus.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Corpus.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Corpus.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Corpus.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Corpus.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Corpus.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountMessageTokensRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensRequest",
+ "google.generativeai.protos.CountMessageTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountMessageTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountMessageTokensRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountMessageTokensRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountMessageTokensRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountMessageTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountMessageTokensRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountMessageTokensRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountMessageTokensResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.CountMessageTokensResponse",
+ "google.generativeai.protos.CountMessageTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountMessageTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountMessageTokensResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountMessageTokensResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountMessageTokensResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountMessageTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountMessageTokensResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountMessageTokensResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTextTokensRequest": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensRequest",
+ "google.generativeai.protos.CountTextTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTextTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTextTokensRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTextTokensRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTextTokensRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTextTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTextTokensRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTextTokensRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTextTokensResponse": "google.ai.generativelanguage_v1beta.types.text_service.CountTextTokensResponse",
+ "google.generativeai.protos.CountTextTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTextTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTextTokensResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTextTokensResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTextTokensResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTextTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTextTokensResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTextTokensResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTokensRequest": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensRequest",
+ "google.generativeai.protos.CountTokensRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTokensRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTokensRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTokensRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTokensRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTokensRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTokensRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTokensRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CountTokensResponse": "google.ai.generativelanguage_v1beta.types.generative_service.CountTokensResponse",
+ "google.generativeai.protos.CountTokensResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CountTokensResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CountTokensResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CountTokensResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CountTokensResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CountTokensResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CountTokensResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CountTokensResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.CreateCachedContentRequest",
+ "google.generativeai.protos.CreateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateChunkRequest",
+ "google.generativeai.protos.CreateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateCorpusRequest",
+ "google.generativeai.protos.CreateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.CreateDocumentRequest",
+ "google.generativeai.protos.CreateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileRequest",
+ "google.generativeai.protos.CreateFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateFileRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateFileRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateFileRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateFileRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateFileRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateFileResponse": "google.ai.generativelanguage_v1beta.types.file_service.CreateFileResponse",
+ "google.generativeai.protos.CreateFileResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateFileResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateFileResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateFileResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateFileResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateFileResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateFileResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateFileResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.CreatePermissionRequest",
+ "google.generativeai.protos.CreatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreatePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreatePermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreatePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreatePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreatePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateTunedModelMetadata": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelMetadata",
+ "google.generativeai.protos.CreateTunedModelMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateTunedModelMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateTunedModelMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateTunedModelMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateTunedModelMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateTunedModelMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateTunedModelMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateTunedModelMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CreateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.CreateTunedModelRequest",
+ "google.generativeai.protos.CreateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CreateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CreateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CreateTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CreateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CreateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CreateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CreateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.CustomMetadata": "google.ai.generativelanguage_v1beta.types.retriever.CustomMetadata",
+ "google.generativeai.protos.CustomMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.CustomMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.CustomMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.CustomMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.CustomMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.CustomMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.CustomMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.CustomMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Dataset": "google.ai.generativelanguage_v1beta.types.tuned_model.Dataset",
+ "google.generativeai.protos.Dataset.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Dataset.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Dataset.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Dataset.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Dataset.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Dataset.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Dataset.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Dataset.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.DeleteCachedContentRequest",
+ "google.generativeai.protos.DeleteCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteChunkRequest",
+ "google.generativeai.protos.DeleteChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteCorpusRequest",
+ "google.generativeai.protos.DeleteCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.DeleteDocumentRequest",
+ "google.generativeai.protos.DeleteDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.DeleteFileRequest",
+ "google.generativeai.protos.DeleteFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteFileRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteFileRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteFileRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteFileRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteFileRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeletePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.DeletePermissionRequest",
+ "google.generativeai.protos.DeletePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeletePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeletePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeletePermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeletePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeletePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeletePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeletePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DeleteTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.DeleteTunedModelRequest",
+ "google.generativeai.protos.DeleteTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DeleteTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DeleteTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DeleteTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DeleteTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DeleteTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DeleteTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DeleteTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Document": "google.ai.generativelanguage_v1beta.types.retriever.Document",
+ "google.generativeai.protos.Document.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Document.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Document.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Document.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Document.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Document.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Document.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Document.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.DynamicRetrievalConfig": "google.ai.generativelanguage_v1beta.types.content.DynamicRetrievalConfig",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.DynamicRetrievalConfig.Mode",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.DynamicRetrievalConfig.Mode.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.DynamicRetrievalConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.DynamicRetrievalConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.DynamicRetrievalConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.DynamicRetrievalConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.DynamicRetrievalConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.DynamicRetrievalConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.DynamicRetrievalConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.DynamicRetrievalConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentRequest",
+ "google.generativeai.protos.EmbedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.EmbedContentResponse",
+ "google.generativeai.protos.EmbedContentResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedContentResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedContentResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedContentResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedContentResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedContentResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedContentResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedContentResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextRequest",
+ "google.generativeai.protos.EmbedTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedTextRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedTextRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedTextRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedTextRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedTextRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.EmbedTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.EmbedTextResponse",
+ "google.generativeai.protos.EmbedTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.EmbedTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.EmbedTextResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.EmbedTextResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.EmbedTextResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.EmbedTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.EmbedTextResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.EmbedTextResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Embedding": "google.ai.generativelanguage_v1beta.types.text_service.Embedding",
+ "google.generativeai.protos.Embedding.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Embedding.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Embedding.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Embedding.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Embedding.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Embedding.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Embedding.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Embedding.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Example": "google.ai.generativelanguage_v1beta.types.discuss_service.Example",
+ "google.generativeai.protos.Example.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Example.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Example.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Example.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Example.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Example.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Example.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Example.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ExecutableCode": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode",
+ "google.generativeai.protos.ExecutableCode.Language": "google.ai.generativelanguage_v1beta.types.content.ExecutableCode.Language",
+ "google.generativeai.protos.ExecutableCode.Language.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.ExecutableCode.Language.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.ExecutableCode.Language.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.ExecutableCode.Language.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.ExecutableCode.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ExecutableCode.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ExecutableCode.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ExecutableCode.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ExecutableCode.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ExecutableCode.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ExecutableCode.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ExecutableCode.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.File": "google.ai.generativelanguage_v1beta.types.file.File",
+ "google.generativeai.protos.File.State": "google.ai.generativelanguage_v1beta.types.file.File.State",
+ "google.generativeai.protos.File.State.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.File.State.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.File.State.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.File.State.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.File.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.File.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.File.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.File.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.File.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.File.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.File.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.File.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FileData": "google.ai.generativelanguage_v1beta.types.content.FileData",
+ "google.generativeai.protos.FileData.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FileData.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FileData.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FileData.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FileData.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FileData.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FileData.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FileData.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionCall": "google.ai.generativelanguage_v1beta.types.content.FunctionCall",
+ "google.generativeai.protos.FunctionCall.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionCall.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionCall.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionCall.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionCall.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionCall.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionCall.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionCall.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionCallingConfig": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig",
+ "google.generativeai.protos.FunctionCallingConfig.Mode": "google.ai.generativelanguage_v1beta.types.content.FunctionCallingConfig.Mode",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.FunctionCallingConfig.Mode.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.FunctionCallingConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionCallingConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionCallingConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionCallingConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionCallingConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionCallingConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionCallingConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionCallingConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionDeclaration": "google.ai.generativelanguage_v1beta.types.content.FunctionDeclaration",
+ "google.generativeai.protos.FunctionDeclaration.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionDeclaration.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionDeclaration.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionDeclaration.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionDeclaration.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionDeclaration.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionDeclaration.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionDeclaration.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.FunctionResponse": "google.ai.generativelanguage_v1beta.types.content.FunctionResponse",
+ "google.generativeai.protos.FunctionResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.FunctionResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.FunctionResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.FunctionResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.FunctionResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.FunctionResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.FunctionResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.FunctionResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateAnswerRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerRequest.AnswerStyle",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.GenerateAnswerRequest.AnswerStyle.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.GenerateAnswerRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateAnswerRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateAnswerRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateAnswerRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateAnswerRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateAnswerRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateAnswerRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateAnswerRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateAnswerResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateAnswerResponse.InputFeedback.BlockReason",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateAnswerResponse.InputFeedback.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateAnswerResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateAnswerResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateAnswerResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateAnswerResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateAnswerResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateAnswerResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateAnswerResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateAnswerResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentRequest": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentRequest",
+ "google.generativeai.protos.GenerateContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentResponse": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.PromptFeedback.BlockReason",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentResponse.PromptFeedback.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GenerateContentResponse.UsageMetadata",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentResponse.UsageMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateContentResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateContentResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateContentResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateContentResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateContentResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateContentResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateContentResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateContentResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateMessageRequest": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageRequest",
+ "google.generativeai.protos.GenerateMessageRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateMessageRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateMessageRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateMessageRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateMessageRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateMessageRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateMessageRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateMessageRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateMessageResponse": "google.ai.generativelanguage_v1beta.types.discuss_service.GenerateMessageResponse",
+ "google.generativeai.protos.GenerateMessageResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateMessageResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateMessageResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateMessageResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateMessageResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateMessageResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateMessageResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateMessageResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateTextRequest": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextRequest",
+ "google.generativeai.protos.GenerateTextRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateTextRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateTextRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateTextRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateTextRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateTextRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateTextRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateTextRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerateTextResponse": "google.ai.generativelanguage_v1beta.types.text_service.GenerateTextResponse",
+ "google.generativeai.protos.GenerateTextResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerateTextResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerateTextResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerateTextResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerateTextResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerateTextResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerateTextResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerateTextResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GenerationConfig": "google.ai.generativelanguage_v1beta.types.generative_service.GenerationConfig",
+ "google.generativeai.protos.GenerationConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GenerationConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GenerationConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GenerationConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GenerationConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GenerationConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GenerationConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GenerationConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.GetCachedContentRequest",
+ "google.generativeai.protos.GetCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetChunkRequest",
+ "google.generativeai.protos.GetChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetCorpusRequest",
+ "google.generativeai.protos.GetCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.GetDocumentRequest",
+ "google.generativeai.protos.GetDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetFileRequest": "google.ai.generativelanguage_v1beta.types.file_service.GetFileRequest",
+ "google.generativeai.protos.GetFileRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetFileRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetFileRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetFileRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetFileRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetFileRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetFileRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetFileRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetModelRequest",
+ "google.generativeai.protos.GetModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetPermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.GetPermissionRequest",
+ "google.generativeai.protos.GetPermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetPermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetPermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetPermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetPermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetPermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetPermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetPermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GetTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.GetTunedModelRequest",
+ "google.generativeai.protos.GetTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GetTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GetTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GetTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GetTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GetTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GetTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GetTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GoogleSearchRetrieval": "google.ai.generativelanguage_v1beta.types.content.GoogleSearchRetrieval",
+ "google.generativeai.protos.GoogleSearchRetrieval.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GoogleSearchRetrieval.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GoogleSearchRetrieval.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GoogleSearchRetrieval.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GoogleSearchRetrieval.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GoogleSearchRetrieval.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GoogleSearchRetrieval.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GoogleSearchRetrieval.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingAttribution": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingAttribution",
+ "google.generativeai.protos.GroundingAttribution.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingAttribution.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingAttribution.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingAttribution.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingAttribution.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingAttribution.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingAttribution.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingAttribution.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingChunk": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingChunk",
+ "google.generativeai.protos.GroundingChunk.Web": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingChunk.Web",
+ "google.generativeai.protos.GroundingChunk.Web.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingChunk.Web.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingChunk.Web.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingChunk.Web.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingChunk.Web.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingChunk.Web.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingChunk.Web.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingChunk.Web.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingChunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingChunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingChunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingChunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingChunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingChunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingChunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingChunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingMetadata",
+ "google.generativeai.protos.GroundingMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingPassage": "google.ai.generativelanguage_v1beta.types.content.GroundingPassage",
+ "google.generativeai.protos.GroundingPassage.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingPassage.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingPassage.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingPassage.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingPassage.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingPassage.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingPassage.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingPassage.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingPassages": "google.ai.generativelanguage_v1beta.types.content.GroundingPassages",
+ "google.generativeai.protos.GroundingPassages.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingPassages.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingPassages.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingPassages.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingPassages.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingPassages.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingPassages.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingPassages.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.GroundingSupport": "google.ai.generativelanguage_v1beta.types.generative_service.GroundingSupport",
+ "google.generativeai.protos.GroundingSupport.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.GroundingSupport.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.GroundingSupport.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.GroundingSupport.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.GroundingSupport.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.GroundingSupport.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.GroundingSupport.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.GroundingSupport.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.HarmCategory": "google.ai.generativelanguage_v1beta.types.safety.HarmCategory",
+ "google.generativeai.protos.HarmCategory.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.HarmCategory.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.HarmCategory.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.HarmCategory.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Hyperparameters": "google.ai.generativelanguage_v1beta.types.tuned_model.Hyperparameters",
+ "google.generativeai.protos.Hyperparameters.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Hyperparameters.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Hyperparameters.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Hyperparameters.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Hyperparameters.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Hyperparameters.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Hyperparameters.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Hyperparameters.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCachedContentsRequest": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsRequest",
+ "google.generativeai.protos.ListCachedContentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCachedContentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCachedContentsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCachedContentsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCachedContentsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCachedContentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCachedContentsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCachedContentsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCachedContentsResponse": "google.ai.generativelanguage_v1beta.types.cache_service.ListCachedContentsResponse",
+ "google.generativeai.protos.ListCachedContentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCachedContentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCachedContentsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCachedContentsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCachedContentsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCachedContentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCachedContentsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCachedContentsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListChunksRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksRequest",
+ "google.generativeai.protos.ListChunksRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListChunksRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListChunksRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListChunksRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListChunksRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListChunksRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListChunksRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListChunksRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListChunksResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListChunksResponse",
+ "google.generativeai.protos.ListChunksResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListChunksResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListChunksResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListChunksResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListChunksResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListChunksResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListChunksResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListChunksResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCorporaRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaRequest",
+ "google.generativeai.protos.ListCorporaRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCorporaRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCorporaRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCorporaRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCorporaRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCorporaRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCorporaRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCorporaRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListCorporaResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListCorporaResponse",
+ "google.generativeai.protos.ListCorporaResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListCorporaResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListCorporaResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListCorporaResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListCorporaResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListCorporaResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListCorporaResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListCorporaResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListDocumentsRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsRequest",
+ "google.generativeai.protos.ListDocumentsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListDocumentsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListDocumentsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListDocumentsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListDocumentsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListDocumentsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListDocumentsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListDocumentsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListDocumentsResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.ListDocumentsResponse",
+ "google.generativeai.protos.ListDocumentsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListDocumentsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListDocumentsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListDocumentsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListDocumentsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListDocumentsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListDocumentsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListDocumentsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListFilesRequest": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesRequest",
+ "google.generativeai.protos.ListFilesRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListFilesRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListFilesRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListFilesRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListFilesRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListFilesRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListFilesRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListFilesRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListFilesResponse": "google.ai.generativelanguage_v1beta.types.file_service.ListFilesResponse",
+ "google.generativeai.protos.ListFilesResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListFilesResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListFilesResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListFilesResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListFilesResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListFilesResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListFilesResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListFilesResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsRequest",
+ "google.generativeai.protos.ListModelsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListModelsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListModelsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListModelsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListModelsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListModelsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListModelsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListModelsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListModelsResponse",
+ "google.generativeai.protos.ListModelsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListModelsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListModelsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListModelsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListModelsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListModelsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListModelsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListModelsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListPermissionsRequest": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsRequest",
+ "google.generativeai.protos.ListPermissionsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListPermissionsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListPermissionsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListPermissionsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListPermissionsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListPermissionsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListPermissionsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListPermissionsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListPermissionsResponse": "google.ai.generativelanguage_v1beta.types.permission_service.ListPermissionsResponse",
+ "google.generativeai.protos.ListPermissionsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListPermissionsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListPermissionsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListPermissionsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListPermissionsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListPermissionsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListPermissionsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListPermissionsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListTunedModelsRequest": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsRequest",
+ "google.generativeai.protos.ListTunedModelsRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListTunedModelsRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListTunedModelsRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListTunedModelsRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListTunedModelsRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListTunedModelsRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListTunedModelsRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListTunedModelsRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ListTunedModelsResponse": "google.ai.generativelanguage_v1beta.types.model_service.ListTunedModelsResponse",
+ "google.generativeai.protos.ListTunedModelsResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ListTunedModelsResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ListTunedModelsResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ListTunedModelsResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ListTunedModelsResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ListTunedModelsResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ListTunedModelsResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ListTunedModelsResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.LogprobsResult": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult",
+ "google.generativeai.protos.LogprobsResult.Candidate": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult.Candidate",
+ "google.generativeai.protos.LogprobsResult.Candidate.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.LogprobsResult.Candidate.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.LogprobsResult.Candidate.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.LogprobsResult.Candidate.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.LogprobsResult.Candidate.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.LogprobsResult.Candidate.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.LogprobsResult.Candidate.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.LogprobsResult.Candidate.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.LogprobsResult.TopCandidates": "google.ai.generativelanguage_v1beta.types.generative_service.LogprobsResult.TopCandidates",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.LogprobsResult.TopCandidates.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.LogprobsResult.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.LogprobsResult.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.LogprobsResult.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.LogprobsResult.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.LogprobsResult.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.LogprobsResult.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.LogprobsResult.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.LogprobsResult.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Message": "google.ai.generativelanguage_v1beta.types.discuss_service.Message",
+ "google.generativeai.protos.Message.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Message.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Message.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Message.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Message.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Message.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Message.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Message.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.MessagePrompt": "google.ai.generativelanguage_v1beta.types.discuss_service.MessagePrompt",
+ "google.generativeai.protos.MessagePrompt.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.MessagePrompt.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.MessagePrompt.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.MessagePrompt.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.MessagePrompt.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.MessagePrompt.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.MessagePrompt.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.MessagePrompt.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.MetadataFilter": "google.ai.generativelanguage_v1beta.types.retriever.MetadataFilter",
+ "google.generativeai.protos.MetadataFilter.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.MetadataFilter.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.MetadataFilter.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.MetadataFilter.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.MetadataFilter.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.MetadataFilter.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.MetadataFilter.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.MetadataFilter.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Model": "google.ai.generativelanguage_v1beta.types.model.Model",
+ "google.generativeai.protos.Model.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Model.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Model.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Model.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Model.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Model.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Model.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Model.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Part": "google.ai.generativelanguage_v1beta.types.content.Part",
+ "google.generativeai.protos.Part.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Part.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Part.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Part.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Part.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Part.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Part.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Part.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Permission": "google.ai.generativelanguage_v1beta.types.permission.Permission",
+ "google.generativeai.protos.Permission.GranteeType": "google.ai.generativelanguage_v1beta.types.permission.Permission.GranteeType",
+ "google.generativeai.protos.Permission.GranteeType.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Permission.GranteeType.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Permission.GranteeType.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Permission.GranteeType.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Permission.Role": "google.ai.generativelanguage_v1beta.types.permission.Permission.Role",
+ "google.generativeai.protos.Permission.Role.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Permission.Role.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Permission.Role.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Permission.Role.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.Permission.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Permission.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Permission.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Permission.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Permission.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Permission.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Permission.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Permission.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.PredictRequest": "google.ai.generativelanguage_v1beta.types.prediction_service.PredictRequest",
+ "google.generativeai.protos.PredictRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.PredictRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.PredictRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.PredictRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.PredictRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.PredictRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.PredictRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.PredictRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.PredictResponse": "google.ai.generativelanguage_v1beta.types.prediction_service.PredictResponse",
+ "google.generativeai.protos.PredictResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.PredictResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.PredictResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.PredictResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.PredictResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.PredictResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.PredictResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.PredictResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusRequest",
+ "google.generativeai.protos.QueryCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryCorpusResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryCorpusResponse",
+ "google.generativeai.protos.QueryCorpusResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryCorpusResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryCorpusResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryCorpusResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryCorpusResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryCorpusResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryCorpusResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryCorpusResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentRequest",
+ "google.generativeai.protos.QueryDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.QueryDocumentResponse": "google.ai.generativelanguage_v1beta.types.retriever_service.QueryDocumentResponse",
+ "google.generativeai.protos.QueryDocumentResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.QueryDocumentResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.QueryDocumentResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.QueryDocumentResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.QueryDocumentResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.QueryDocumentResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.QueryDocumentResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.QueryDocumentResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.RelevantChunk": "google.ai.generativelanguage_v1beta.types.retriever_service.RelevantChunk",
+ "google.generativeai.protos.RelevantChunk.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.RelevantChunk.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.RelevantChunk.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.RelevantChunk.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.RelevantChunk.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.RelevantChunk.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.RelevantChunk.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.RelevantChunk.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.RetrievalMetadata": "google.ai.generativelanguage_v1beta.types.generative_service.RetrievalMetadata",
+ "google.generativeai.protos.RetrievalMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.RetrievalMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.RetrievalMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.RetrievalMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.RetrievalMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.RetrievalMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.RetrievalMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.RetrievalMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SafetyFeedback": "google.ai.generativelanguage_v1beta.types.safety.SafetyFeedback",
+ "google.generativeai.protos.SafetyFeedback.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SafetyFeedback.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SafetyFeedback.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SafetyFeedback.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SafetyFeedback.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SafetyFeedback.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SafetyFeedback.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SafetyFeedback.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SafetyRating": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating",
+ "google.generativeai.protos.SafetyRating.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SafetyRating.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SafetyRating.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SafetyRating.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SafetyRating.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SafetyRating.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SafetyRating.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SafetyRating.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SafetySetting": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting",
+ "google.generativeai.protos.SafetySetting.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SafetySetting.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SafetySetting.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SafetySetting.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SafetySetting.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SafetySetting.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SafetySetting.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SafetySetting.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Schema": "google.ai.generativelanguage_v1beta.types.content.Schema",
+ "google.generativeai.protos.Schema.PropertiesEntry": "google.ai.generativelanguage_v1beta.types.content.Schema.PropertiesEntry",
+ "google.generativeai.protos.Schema.PropertiesEntry.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Schema.PropertiesEntry.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Schema.PropertiesEntry.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Schema.PropertiesEntry.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Schema.PropertiesEntry.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Schema.PropertiesEntry.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Schema.PropertiesEntry.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Schema.PropertiesEntry.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Schema.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Schema.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Schema.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Schema.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Schema.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Schema.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Schema.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Schema.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SearchEntryPoint": "google.ai.generativelanguage_v1beta.types.generative_service.SearchEntryPoint",
+ "google.generativeai.protos.SearchEntryPoint.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SearchEntryPoint.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SearchEntryPoint.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SearchEntryPoint.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SearchEntryPoint.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SearchEntryPoint.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SearchEntryPoint.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SearchEntryPoint.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Segment": "google.ai.generativelanguage_v1beta.types.generative_service.Segment",
+ "google.generativeai.protos.Segment.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Segment.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Segment.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Segment.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Segment.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Segment.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Segment.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Segment.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.SemanticRetrieverConfig": "google.ai.generativelanguage_v1beta.types.generative_service.SemanticRetrieverConfig",
+ "google.generativeai.protos.SemanticRetrieverConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.SemanticRetrieverConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.SemanticRetrieverConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.SemanticRetrieverConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.SemanticRetrieverConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.SemanticRetrieverConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.SemanticRetrieverConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.SemanticRetrieverConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.StringList": "google.ai.generativelanguage_v1beta.types.retriever.StringList",
+ "google.generativeai.protos.StringList.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.StringList.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.StringList.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.StringList.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.StringList.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.StringList.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.StringList.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.StringList.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TaskType": "google.ai.generativelanguage_v1beta.types.generative_service.TaskType",
+ "google.generativeai.protos.TaskType.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.TaskType.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.TaskType.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.TaskType.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.TextCompletion": "google.ai.generativelanguage_v1beta.types.text_service.TextCompletion",
+ "google.generativeai.protos.TextCompletion.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TextCompletion.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TextCompletion.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TextCompletion.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TextCompletion.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TextCompletion.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TextCompletion.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TextCompletion.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TextPrompt": "google.ai.generativelanguage_v1beta.types.text_service.TextPrompt",
+ "google.generativeai.protos.TextPrompt.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TextPrompt.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TextPrompt.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TextPrompt.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TextPrompt.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TextPrompt.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TextPrompt.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TextPrompt.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Tool": "google.ai.generativelanguage_v1beta.types.content.Tool",
+ "google.generativeai.protos.Tool.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.Tool.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.Tool.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.Tool.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.Tool.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.Tool.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.Tool.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.Tool.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.ToolConfig": "google.ai.generativelanguage_v1beta.types.content.ToolConfig",
+ "google.generativeai.protos.ToolConfig.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.ToolConfig.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.ToolConfig.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.ToolConfig.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.ToolConfig.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.ToolConfig.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.ToolConfig.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.ToolConfig.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TransferOwnershipRequest": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipRequest",
+ "google.generativeai.protos.TransferOwnershipRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TransferOwnershipRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TransferOwnershipRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TransferOwnershipRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TransferOwnershipRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TransferOwnershipRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TransferOwnershipRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TransferOwnershipRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TransferOwnershipResponse": "google.ai.generativelanguage_v1beta.types.permission_service.TransferOwnershipResponse",
+ "google.generativeai.protos.TransferOwnershipResponse.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TransferOwnershipResponse.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TransferOwnershipResponse.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TransferOwnershipResponse.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TransferOwnershipResponse.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TransferOwnershipResponse.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TransferOwnershipResponse.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TransferOwnershipResponse.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TunedModel": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel",
+ "google.generativeai.protos.TunedModel.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TunedModel.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TunedModel.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TunedModel.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TunedModel.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TunedModel.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TunedModel.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TunedModel.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TunedModelSource": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModelSource",
+ "google.generativeai.protos.TunedModelSource.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TunedModelSource.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TunedModelSource.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TunedModelSource.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TunedModelSource.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TunedModelSource.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TunedModelSource.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TunedModelSource.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningExample": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExample",
+ "google.generativeai.protos.TuningExample.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningExample.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningExample.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningExample.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningExample.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningExample.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningExample.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningExample.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningExamples": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningExamples",
+ "google.generativeai.protos.TuningExamples.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningExamples.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningExamples.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningExamples.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningExamples.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningExamples.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningExamples.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningExamples.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningSnapshot": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningSnapshot",
+ "google.generativeai.protos.TuningSnapshot.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningSnapshot.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningSnapshot.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningSnapshot.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningSnapshot.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningSnapshot.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningSnapshot.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningSnapshot.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.TuningTask": "google.ai.generativelanguage_v1beta.types.tuned_model.TuningTask",
+ "google.generativeai.protos.TuningTask.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.TuningTask.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.TuningTask.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.TuningTask.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.TuningTask.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.TuningTask.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.TuningTask.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.TuningTask.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.Type": "google.ai.generativelanguage_v1beta.types.content.Type",
+ "google.generativeai.protos.Type.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.protos.Type.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.protos.Type.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.protos.Type.__len__": "enum.EnumType.__len__",
+ "google.generativeai.protos.UpdateCachedContentRequest": "google.ai.generativelanguage_v1beta.types.cache_service.UpdateCachedContentRequest",
+ "google.generativeai.protos.UpdateCachedContentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateCachedContentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateCachedContentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateCachedContentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateCachedContentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateCachedContentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateCachedContentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateCachedContentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateChunkRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateChunkRequest",
+ "google.generativeai.protos.UpdateChunkRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateChunkRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateChunkRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateChunkRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateChunkRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateChunkRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateChunkRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateChunkRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateCorpusRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateCorpusRequest",
+ "google.generativeai.protos.UpdateCorpusRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateCorpusRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateCorpusRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateCorpusRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateCorpusRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateCorpusRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateCorpusRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateCorpusRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateDocumentRequest": "google.ai.generativelanguage_v1beta.types.retriever_service.UpdateDocumentRequest",
+ "google.generativeai.protos.UpdateDocumentRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateDocumentRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateDocumentRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateDocumentRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateDocumentRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateDocumentRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateDocumentRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateDocumentRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdatePermissionRequest": "google.ai.generativelanguage_v1beta.types.permission_service.UpdatePermissionRequest",
+ "google.generativeai.protos.UpdatePermissionRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdatePermissionRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdatePermissionRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdatePermissionRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdatePermissionRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdatePermissionRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdatePermissionRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdatePermissionRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.UpdateTunedModelRequest": "google.ai.generativelanguage_v1beta.types.model_service.UpdateTunedModelRequest",
+ "google.generativeai.protos.UpdateTunedModelRequest.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.UpdateTunedModelRequest.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.UpdateTunedModelRequest.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.UpdateTunedModelRequest.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.UpdateTunedModelRequest.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.UpdateTunedModelRequest.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.UpdateTunedModelRequest.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.UpdateTunedModelRequest.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.protos.VideoMetadata": "google.ai.generativelanguage_v1beta.types.file.VideoMetadata",
+ "google.generativeai.protos.VideoMetadata.copy_from": "proto.message.MessageMeta.copy_from",
+ "google.generativeai.protos.VideoMetadata.deserialize": "proto.message.MessageMeta.deserialize",
+ "google.generativeai.protos.VideoMetadata.from_json": "proto.message.MessageMeta.from_json",
+ "google.generativeai.protos.VideoMetadata.pb": "proto.message.MessageMeta.pb",
+ "google.generativeai.protos.VideoMetadata.serialize": "proto.message.MessageMeta.serialize",
+ "google.generativeai.protos.VideoMetadata.to_dict": "proto.message.MessageMeta.to_dict",
+ "google.generativeai.protos.VideoMetadata.to_json": "proto.message.MessageMeta.to_json",
+ "google.generativeai.protos.VideoMetadata.wrap": "proto.message.MessageMeta.wrap",
+ "google.generativeai.types": "google.generativeai.types",
+ "google.generativeai.types.AsyncGenerateContentResponse": "google.generativeai.types.generation_types.AsyncGenerateContentResponse",
+ "google.generativeai.types.AsyncGenerateContentResponse.__init__": "google.generativeai.types.generation_types.BaseGenerateContentResponse.__init__",
+ "google.generativeai.types.AsyncGenerateContentResponse.from_aiterator": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_aiterator",
+ "google.generativeai.types.AsyncGenerateContentResponse.from_response": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.from_response",
+ "google.generativeai.types.AsyncGenerateContentResponse.resolve": "google.generativeai.types.generation_types.AsyncGenerateContentResponse.resolve",
+ "google.generativeai.types.AsyncGenerateContentResponse.to_dict": "google.generativeai.types.generation_types.BaseGenerateContentResponse.to_dict",
+ "google.generativeai.types.BlobDict": "google.generativeai.types.content_types.BlobDict",
+ "google.generativeai.types.BlockedPromptException": "google.generativeai.types.generation_types.BlockedPromptException",
+ "google.generativeai.types.BlockedReason": "google.ai.generativelanguage_v1beta.types.safety.ContentFilter.BlockedReason",
+ "google.generativeai.types.BlockedReason.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.BlockedReason.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.BlockedReason.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.BlockedReason.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.BrokenResponseError": "google.generativeai.types.generation_types.BrokenResponseError",
+ "google.generativeai.types.CallableFunctionDeclaration": "google.generativeai.types.content_types.CallableFunctionDeclaration",
+ "google.generativeai.types.CallableFunctionDeclaration.__call__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__call__",
+ "google.generativeai.types.CallableFunctionDeclaration.__init__": "google.generativeai.types.content_types.CallableFunctionDeclaration.__init__",
+ "google.generativeai.types.CallableFunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto",
+ "google.generativeai.types.CitationMetadataDict": "google.generativeai.types.citation_types.CitationMetadataDict",
+ "google.generativeai.types.CitationSourceDict": "google.generativeai.types.citation_types.CitationSourceDict",
+ "google.generativeai.types.ContentDict": "google.generativeai.types.content_types.ContentDict",
+ "google.generativeai.types.ContentFilterDict": "google.generativeai.types.safety_types.ContentFilterDict",
+ "google.generativeai.types.File": "google.generativeai.types.file_types.File",
+ "google.generativeai.types.File.__init__": "google.generativeai.types.file_types.File.__init__",
+ "google.generativeai.types.File.delete": "google.generativeai.types.file_types.File.delete",
+ "google.generativeai.types.File.to_dict": "google.generativeai.types.file_types.File.to_dict",
+ "google.generativeai.types.File.to_proto": "google.generativeai.types.file_types.File.to_proto",
+ "google.generativeai.types.FileDataDict": "google.generativeai.types.file_types.FileDataDict",
+ "google.generativeai.types.FunctionDeclaration": "google.generativeai.types.content_types.FunctionDeclaration",
+ "google.generativeai.types.FunctionDeclaration.__init__": "google.generativeai.types.content_types.FunctionDeclaration.__init__",
+ "google.generativeai.types.FunctionDeclaration.from_function": "google.generativeai.types.content_types.FunctionDeclaration.from_function",
+ "google.generativeai.types.FunctionDeclaration.from_proto": "google.generativeai.types.content_types.FunctionDeclaration.from_proto",
+ "google.generativeai.types.FunctionDeclaration.to_proto": "google.generativeai.types.content_types.FunctionDeclaration.to_proto",
+ "google.generativeai.types.FunctionLibrary": "google.generativeai.types.content_types.FunctionLibrary",
+ "google.generativeai.types.FunctionLibrary.__call__": "google.generativeai.types.content_types.FunctionLibrary.__call__",
+ "google.generativeai.types.FunctionLibrary.__getitem__": "google.generativeai.types.content_types.FunctionLibrary.__getitem__",
+ "google.generativeai.types.FunctionLibrary.__init__": "google.generativeai.types.content_types.FunctionLibrary.__init__",
+ "google.generativeai.types.FunctionLibrary.to_proto": "google.generativeai.types.content_types.FunctionLibrary.to_proto",
+ "google.generativeai.types.GenerateContentResponse": "google.generativeai.types.generation_types.GenerateContentResponse",
+ "google.generativeai.types.GenerateContentResponse.__iter__": "google.generativeai.types.generation_types.GenerateContentResponse.__iter__",
+ "google.generativeai.types.GenerateContentResponse.from_iterator": "google.generativeai.types.generation_types.GenerateContentResponse.from_iterator",
+ "google.generativeai.types.GenerateContentResponse.from_response": "google.generativeai.types.generation_types.GenerateContentResponse.from_response",
+ "google.generativeai.types.GenerateContentResponse.resolve": "google.generativeai.types.generation_types.GenerateContentResponse.resolve",
+ "google.generativeai.types.GenerationConfig": "google.generativeai.types.generation_types.GenerationConfig",
+ "google.generativeai.types.GenerationConfig.__eq__": "google.generativeai.types.generation_types.GenerationConfig.__eq__",
+ "google.generativeai.types.GenerationConfig.__init__": "google.generativeai.types.generation_types.GenerationConfig.__init__",
+ "google.generativeai.types.GenerationConfigDict": "google.generativeai.types.generation_types.GenerationConfigDict",
+ "google.generativeai.types.HarmBlockThreshold": "google.ai.generativelanguage_v1beta.types.safety.SafetySetting.HarmBlockThreshold",
+ "google.generativeai.types.HarmBlockThreshold.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.HarmBlockThreshold.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.HarmBlockThreshold.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.HarmBlockThreshold.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.HarmCategory": "google.generativeai.types.safety_types.HarmCategory",
+ "google.generativeai.types.HarmCategory.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.HarmCategory.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.HarmCategory.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.HarmCategory.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.HarmProbability": "google.ai.generativelanguage_v1beta.types.safety.SafetyRating.HarmProbability",
+ "google.generativeai.types.HarmProbability.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.HarmProbability.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.HarmProbability.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.HarmProbability.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.IncompleteIterationError": "google.generativeai.types.generation_types.IncompleteIterationError",
+ "google.generativeai.types.Model": "google.generativeai.types.model_types.Model",
+ "google.generativeai.types.Model.__eq__": "google.generativeai.types.model_types.Model.__eq__",
+ "google.generativeai.types.Model.__init__": "google.generativeai.types.model_types.Model.__init__",
+ "google.generativeai.types.PartDict": "google.generativeai.types.content_types.PartDict",
+ "google.generativeai.types.Permission": "google.generativeai.types.permission_types.Permission",
+ "google.generativeai.types.Permission.__eq__": "google.generativeai.types.permission_types.Permission.__eq__",
+ "google.generativeai.types.Permission.__init__": "google.generativeai.types.permission_types.Permission.__init__",
+ "google.generativeai.types.Permission.delete": "google.generativeai.types.permission_types.Permission.delete",
+ "google.generativeai.types.Permission.delete_async": "google.generativeai.types.permission_types.Permission.delete_async",
+ "google.generativeai.types.Permission.get": "google.generativeai.types.permission_types.Permission.get",
+ "google.generativeai.types.Permission.get_async": "google.generativeai.types.permission_types.Permission.get_async",
+ "google.generativeai.types.Permission.to_dict": "google.generativeai.types.permission_types.Permission.to_dict",
+ "google.generativeai.types.Permission.update": "google.generativeai.types.permission_types.Permission.update",
+ "google.generativeai.types.Permission.update_async": "google.generativeai.types.permission_types.Permission.update_async",
+ "google.generativeai.types.Permissions": "google.generativeai.types.permission_types.Permissions",
+ "google.generativeai.types.Permissions.__init__": "google.generativeai.types.permission_types.Permissions.__init__",
+ "google.generativeai.types.Permissions.__iter__": "google.generativeai.types.permission_types.Permissions.__iter__",
+ "google.generativeai.types.Permissions.create": "google.generativeai.types.permission_types.Permissions.create",
+ "google.generativeai.types.Permissions.create_async": "google.generativeai.types.permission_types.Permissions.create_async",
+ "google.generativeai.types.Permissions.get": "google.generativeai.types.permission_types.Permissions.get",
+ "google.generativeai.types.Permissions.get_async": "google.generativeai.types.permission_types.Permissions.get_async",
+ "google.generativeai.types.Permissions.list": "google.generativeai.types.permission_types.Permissions.list",
+ "google.generativeai.types.Permissions.list_async": "google.generativeai.types.permission_types.Permissions.list_async",
+ "google.generativeai.types.Permissions.transfer_ownership": "google.generativeai.types.permission_types.Permissions.transfer_ownership",
+ "google.generativeai.types.Permissions.transfer_ownership_async": "google.generativeai.types.permission_types.Permissions.transfer_ownership_async",
+ "google.generativeai.types.RequestOptions": "google.generativeai.types.helper_types.RequestOptions",
+ "google.generativeai.types.RequestOptions.__contains__": "collections.abc.Mapping.__contains__",
+ "google.generativeai.types.RequestOptions.__eq__": "google.generativeai.types.helper_types.RequestOptions.__eq__",
+ "google.generativeai.types.RequestOptions.__getitem__": "google.generativeai.types.helper_types.RequestOptions.__getitem__",
+ "google.generativeai.types.RequestOptions.__init__": "google.generativeai.types.helper_types.RequestOptions.__init__",
+ "google.generativeai.types.RequestOptions.__iter__": "google.generativeai.types.helper_types.RequestOptions.__iter__",
+ "google.generativeai.types.RequestOptions.__len__": "google.generativeai.types.helper_types.RequestOptions.__len__",
+ "google.generativeai.types.RequestOptions.get": "collections.abc.Mapping.get",
+ "google.generativeai.types.RequestOptions.items": "collections.abc.Mapping.items",
+ "google.generativeai.types.RequestOptions.keys": "collections.abc.Mapping.keys",
+ "google.generativeai.types.RequestOptions.values": "collections.abc.Mapping.values",
+ "google.generativeai.types.SafetyFeedbackDict": "google.generativeai.types.safety_types.SafetyFeedbackDict",
+ "google.generativeai.types.SafetyRatingDict": "google.generativeai.types.safety_types.SafetyRatingDict",
+ "google.generativeai.types.SafetySettingDict": "google.generativeai.types.safety_types.SafetySettingDict",
+ "google.generativeai.types.Status": "google.rpc.status_pb2.Status",
+ "google.generativeai.types.Status.RegisterExtension": "google.protobuf.message.Message.RegisterExtension",
+ "google.generativeai.types.StopCandidateException": "google.generativeai.types.generation_types.StopCandidateException",
+ "google.generativeai.types.Tool": "google.generativeai.types.content_types.Tool",
+ "google.generativeai.types.Tool.__call__": "google.generativeai.types.content_types.Tool.__call__",
+ "google.generativeai.types.Tool.__getitem__": "google.generativeai.types.content_types.Tool.__getitem__",
+ "google.generativeai.types.Tool.__init__": "google.generativeai.types.content_types.Tool.__init__",
+ "google.generativeai.types.Tool.to_proto": "google.generativeai.types.content_types.Tool.to_proto",
+ "google.generativeai.types.ToolDict": "google.generativeai.types.content_types.ToolDict",
+ "google.generativeai.types.TunedModel": "google.generativeai.types.model_types.TunedModel",
+ "google.generativeai.types.TunedModel.__eq__": "google.generativeai.types.model_types.TunedModel.__eq__",
+ "google.generativeai.types.TunedModel.__init__": "google.generativeai.types.model_types.TunedModel.__init__",
+ "google.generativeai.types.TunedModelState": "google.ai.generativelanguage_v1beta.types.tuned_model.TunedModel.State",
+ "google.generativeai.types.TunedModelState.__contains__": "enum.EnumType.__contains__",
+ "google.generativeai.types.TunedModelState.__getitem__": "enum.EnumType.__getitem__",
+ "google.generativeai.types.TunedModelState.__iter__": "enum.EnumType.__iter__",
+ "google.generativeai.types.TunedModelState.__len__": "enum.EnumType.__len__",
+ "google.generativeai.types.TypedDict": "typing_extensions.TypedDict",
+ "google.generativeai.types.get_default_file_client": "google.generativeai.client.get_default_file_client",
+ "google.generativeai.types.to_file_data": "google.generativeai.types.file_types.to_file_data",
+ "google.generativeai.update_tuned_model": "google.generativeai.models.update_tuned_model",
+ "google.generativeai.upload_file": "google.generativeai.files.upload_file"
+ },
+ "py_module_names": {
+ "google.generativeai": "google.generativeai"
+ }
+}
diff --git a/docs/api/google/generativeai/_redirects.yaml b/docs/api/google/generativeai/_redirects.yaml
new file mode 100644
index 000000000..cea696430
--- /dev/null
+++ b/docs/api/google/generativeai/_redirects.yaml
@@ -0,0 +1,13 @@
+redirects:
+- from: /api/python/google/generativeai/GenerationConfig
+ to: /api/python/google/generativeai/types/GenerationConfig
+- from: /api/python/google/generativeai/protos/ContentFilter/BlockedReason
+ to: /api/python/google/generativeai/types/BlockedReason
+- from: /api/python/google/generativeai/protos/SafetyRating/HarmProbability
+ to: /api/python/google/generativeai/types/HarmProbability
+- from: /api/python/google/generativeai/protos/SafetySetting/HarmBlockThreshold
+ to: /api/python/google/generativeai/types/HarmBlockThreshold
+- from: /api/python/google/generativeai/protos/TunedModel/State
+ to: /api/python/google/generativeai/types/TunedModelState
+- from: /api/python/google/generativeai/types/ModelNameOptions
+ to: /api/python/google/generativeai/types/AnyModelNameOptions
diff --git a/docs/api/google/generativeai/_toc.yaml b/docs/api/google/generativeai/_toc.yaml
new file mode 100644
index 000000000..7d18dbf66
--- /dev/null
+++ b/docs/api/google/generativeai/_toc.yaml
@@ -0,0 +1,509 @@
+toc:
+- title: google.generativeai
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai
+ - title: ChatSession
+ path: /api/python/google/generativeai/ChatSession
+ - title: GenerativeModel
+ path: /api/python/google/generativeai/GenerativeModel
+ - title: configure
+ path: /api/python/google/generativeai/configure
+ - title: create_tuned_model
+ path: /api/python/google/generativeai/create_tuned_model
+ - title: delete_file
+ path: /api/python/google/generativeai/delete_file
+ - title: delete_tuned_model
+ path: /api/python/google/generativeai/delete_tuned_model
+ - title: embed_content
+ path: /api/python/google/generativeai/embed_content
+ - title: embed_content_async
+ path: /api/python/google/generativeai/embed_content_async
+ - title: get_base_model
+ path: /api/python/google/generativeai/get_base_model
+ - title: get_file
+ path: /api/python/google/generativeai/get_file
+ - title: get_model
+ path: /api/python/google/generativeai/get_model
+ - title: get_operation
+ path: /api/python/google/generativeai/get_operation
+ - title: get_tuned_model
+ path: /api/python/google/generativeai/get_tuned_model
+ - title: list_files
+ path: /api/python/google/generativeai/list_files
+ - title: list_models
+ path: /api/python/google/generativeai/list_models
+ - title: list_operations
+ path: /api/python/google/generativeai/list_operations
+ - title: list_tuned_models
+ path: /api/python/google/generativeai/list_tuned_models
+ - title: update_tuned_model
+ path: /api/python/google/generativeai/update_tuned_model
+ - title: upload_file
+ path: /api/python/google/generativeai/upload_file
+ - title: caching
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai/caching
+ - title: CachedContent
+ path: /api/python/google/generativeai/caching/CachedContent
+ - title: get_default_cache_client
+ path: /api/python/google/generativeai/caching/get_default_cache_client
+ - title: protos
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai/protos
+ - title: AttributionSourceId
+ path: /api/python/google/generativeai/protos/AttributionSourceId
+ - title: AttributionSourceId.GroundingPassageId
+ path: /api/python/google/generativeai/protos/AttributionSourceId/GroundingPassageId
+ - title: AttributionSourceId.SemanticRetrieverChunk
+ path: /api/python/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk
+ - title: BatchCreateChunksRequest
+ path: /api/python/google/generativeai/protos/BatchCreateChunksRequest
+ - title: BatchCreateChunksResponse
+ path: /api/python/google/generativeai/protos/BatchCreateChunksResponse
+ - title: BatchDeleteChunksRequest
+ path: /api/python/google/generativeai/protos/BatchDeleteChunksRequest
+ - title: BatchEmbedContentsRequest
+ path: /api/python/google/generativeai/protos/BatchEmbedContentsRequest
+ - title: BatchEmbedContentsResponse
+ path: /api/python/google/generativeai/protos/BatchEmbedContentsResponse
+ - title: BatchEmbedTextRequest
+ path: /api/python/google/generativeai/protos/BatchEmbedTextRequest
+ - title: BatchEmbedTextResponse
+ path: /api/python/google/generativeai/protos/BatchEmbedTextResponse
+ - title: BatchUpdateChunksRequest
+ path: /api/python/google/generativeai/protos/BatchUpdateChunksRequest
+ - title: BatchUpdateChunksResponse
+ path: /api/python/google/generativeai/protos/BatchUpdateChunksResponse
+ - title: Blob
+ path: /api/python/google/generativeai/protos/Blob
+ - title: CachedContent
+ path: /api/python/google/generativeai/protos/CachedContent
+ - title: CachedContent.UsageMetadata
+ path: /api/python/google/generativeai/protos/CachedContent/UsageMetadata
+ - title: Candidate
+ path: /api/python/google/generativeai/protos/Candidate
+ - title: Candidate.FinishReason
+ path: /api/python/google/generativeai/protos/Candidate/FinishReason
+ - title: Chunk
+ path: /api/python/google/generativeai/protos/Chunk
+ - title: Chunk.State
+ path: /api/python/google/generativeai/protos/Chunk/State
+ - title: ChunkData
+ path: /api/python/google/generativeai/protos/ChunkData
+ - title: CitationMetadata
+ path: /api/python/google/generativeai/protos/CitationMetadata
+ - title: CitationSource
+ path: /api/python/google/generativeai/protos/CitationSource
+ - title: CodeExecution
+ path: /api/python/google/generativeai/protos/CodeExecution
+ - title: CodeExecutionResult
+ path: /api/python/google/generativeai/protos/CodeExecutionResult
+ - title: CodeExecutionResult.Outcome
+ path: /api/python/google/generativeai/protos/CodeExecutionResult/Outcome
+ - title: Condition
+ path: /api/python/google/generativeai/protos/Condition
+ - title: Condition.Operator
+ path: /api/python/google/generativeai/protos/Condition/Operator
+ - title: Content
+ path: /api/python/google/generativeai/protos/Content
+ - title: ContentEmbedding
+ path: /api/python/google/generativeai/protos/ContentEmbedding
+ - title: ContentFilter
+ path: /api/python/google/generativeai/protos/ContentFilter
+ - title: Corpus
+ path: /api/python/google/generativeai/protos/Corpus
+ - title: CountMessageTokensRequest
+ path: /api/python/google/generativeai/protos/CountMessageTokensRequest
+ - title: CountMessageTokensResponse
+ path: /api/python/google/generativeai/protos/CountMessageTokensResponse
+ - title: CountTextTokensRequest
+ path: /api/python/google/generativeai/protos/CountTextTokensRequest
+ - title: CountTextTokensResponse
+ path: /api/python/google/generativeai/protos/CountTextTokensResponse
+ - title: CountTokensRequest
+ path: /api/python/google/generativeai/protos/CountTokensRequest
+ - title: CountTokensResponse
+ path: /api/python/google/generativeai/protos/CountTokensResponse
+ - title: CreateCachedContentRequest
+ path: /api/python/google/generativeai/protos/CreateCachedContentRequest
+ - title: CreateChunkRequest
+ path: /api/python/google/generativeai/protos/CreateChunkRequest
+ - title: CreateCorpusRequest
+ path: /api/python/google/generativeai/protos/CreateCorpusRequest
+ - title: CreateDocumentRequest
+ path: /api/python/google/generativeai/protos/CreateDocumentRequest
+ - title: CreateFileRequest
+ path: /api/python/google/generativeai/protos/CreateFileRequest
+ - title: CreateFileResponse
+ path: /api/python/google/generativeai/protos/CreateFileResponse
+ - title: CreatePermissionRequest
+ path: /api/python/google/generativeai/protos/CreatePermissionRequest
+ - title: CreateTunedModelMetadata
+ path: /api/python/google/generativeai/protos/CreateTunedModelMetadata
+ - title: CreateTunedModelRequest
+ path: /api/python/google/generativeai/protos/CreateTunedModelRequest
+ - title: CustomMetadata
+ path: /api/python/google/generativeai/protos/CustomMetadata
+ - title: Dataset
+ path: /api/python/google/generativeai/protos/Dataset
+ - title: DeleteCachedContentRequest
+ path: /api/python/google/generativeai/protos/DeleteCachedContentRequest
+ - title: DeleteChunkRequest
+ path: /api/python/google/generativeai/protos/DeleteChunkRequest
+ - title: DeleteCorpusRequest
+ path: /api/python/google/generativeai/protos/DeleteCorpusRequest
+ - title: DeleteDocumentRequest
+ path: /api/python/google/generativeai/protos/DeleteDocumentRequest
+ - title: DeleteFileRequest
+ path: /api/python/google/generativeai/protos/DeleteFileRequest
+ - title: DeletePermissionRequest
+ path: /api/python/google/generativeai/protos/DeletePermissionRequest
+ - title: DeleteTunedModelRequest
+ path: /api/python/google/generativeai/protos/DeleteTunedModelRequest
+ - title: Document
+ path: /api/python/google/generativeai/protos/Document
+ - title: DynamicRetrievalConfig
+ path: /api/python/google/generativeai/protos/DynamicRetrievalConfig
+ - title: DynamicRetrievalConfig.Mode
+ path: /api/python/google/generativeai/protos/DynamicRetrievalConfig/Mode
+ - title: EmbedContentRequest
+ path: /api/python/google/generativeai/protos/EmbedContentRequest
+ - title: EmbedContentResponse
+ path: /api/python/google/generativeai/protos/EmbedContentResponse
+ - title: EmbedTextRequest
+ path: /api/python/google/generativeai/protos/EmbedTextRequest
+ - title: EmbedTextResponse
+ path: /api/python/google/generativeai/protos/EmbedTextResponse
+ - title: Embedding
+ path: /api/python/google/generativeai/protos/Embedding
+ - title: Example
+ path: /api/python/google/generativeai/protos/Example
+ - title: ExecutableCode
+ path: /api/python/google/generativeai/protos/ExecutableCode
+ - title: ExecutableCode.Language
+ path: /api/python/google/generativeai/protos/ExecutableCode/Language
+ - title: File
+ path: /api/python/google/generativeai/protos/File
+ - title: File.State
+ path: /api/python/google/generativeai/protos/File/State
+ - title: FileData
+ path: /api/python/google/generativeai/protos/FileData
+ - title: FunctionCall
+ path: /api/python/google/generativeai/protos/FunctionCall
+ - title: FunctionCallingConfig
+ path: /api/python/google/generativeai/protos/FunctionCallingConfig
+ - title: FunctionCallingConfig.Mode
+ path: /api/python/google/generativeai/protos/FunctionCallingConfig/Mode
+ - title: FunctionDeclaration
+ path: /api/python/google/generativeai/protos/FunctionDeclaration
+ - title: FunctionResponse
+ path: /api/python/google/generativeai/protos/FunctionResponse
+ - title: GenerateAnswerRequest
+ path: /api/python/google/generativeai/protos/GenerateAnswerRequest
+ - title: GenerateAnswerRequest.AnswerStyle
+ path: /api/python/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle
+ - title: GenerateAnswerResponse
+ path: /api/python/google/generativeai/protos/GenerateAnswerResponse
+ - title: GenerateAnswerResponse.InputFeedback
+ path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback
+ - title: GenerateAnswerResponse.InputFeedback.BlockReason
+ path: /api/python/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason
+ - title: GenerateContentRequest
+ path: /api/python/google/generativeai/protos/GenerateContentRequest
+ - title: GenerateContentResponse
+ path: /api/python/google/generativeai/protos/GenerateContentResponse
+ - title: GenerateContentResponse.PromptFeedback
+ path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback
+ - title: GenerateContentResponse.PromptFeedback.BlockReason
+ path: /api/python/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason
+ - title: GenerateContentResponse.UsageMetadata
+ path: /api/python/google/generativeai/protos/GenerateContentResponse/UsageMetadata
+ - title: GenerateMessageRequest
+ path: /api/python/google/generativeai/protos/GenerateMessageRequest
+ - title: GenerateMessageResponse
+ path: /api/python/google/generativeai/protos/GenerateMessageResponse
+ - title: GenerateTextRequest
+ path: /api/python/google/generativeai/protos/GenerateTextRequest
+ - title: GenerateTextResponse
+ path: /api/python/google/generativeai/protos/GenerateTextResponse
+ - title: GenerationConfig
+ path: /api/python/google/generativeai/protos/GenerationConfig
+ - title: GetCachedContentRequest
+ path: /api/python/google/generativeai/protos/GetCachedContentRequest
+ - title: GetChunkRequest
+ path: /api/python/google/generativeai/protos/GetChunkRequest
+ - title: GetCorpusRequest
+ path: /api/python/google/generativeai/protos/GetCorpusRequest
+ - title: GetDocumentRequest
+ path: /api/python/google/generativeai/protos/GetDocumentRequest
+ - title: GetFileRequest
+ path: /api/python/google/generativeai/protos/GetFileRequest
+ - title: GetModelRequest
+ path: /api/python/google/generativeai/protos/GetModelRequest
+ - title: GetPermissionRequest
+ path: /api/python/google/generativeai/protos/GetPermissionRequest
+ - title: GetTunedModelRequest
+ path: /api/python/google/generativeai/protos/GetTunedModelRequest
+ - title: GoogleSearchRetrieval
+ path: /api/python/google/generativeai/protos/GoogleSearchRetrieval
+ - title: GroundingAttribution
+ path: /api/python/google/generativeai/protos/GroundingAttribution
+ - title: GroundingChunk
+ path: /api/python/google/generativeai/protos/GroundingChunk
+ - title: GroundingChunk.Web
+ path: /api/python/google/generativeai/protos/GroundingChunk/Web
+ - title: GroundingMetadata
+ path: /api/python/google/generativeai/protos/GroundingMetadata
+ - title: GroundingPassage
+ path: /api/python/google/generativeai/protos/GroundingPassage
+ - title: GroundingPassages
+ path: /api/python/google/generativeai/protos/GroundingPassages
+ - title: GroundingSupport
+ path: /api/python/google/generativeai/protos/GroundingSupport
+ - title: HarmCategory
+ path: /api/python/google/generativeai/protos/HarmCategory
+ - title: Hyperparameters
+ path: /api/python/google/generativeai/protos/Hyperparameters
+ - title: ListCachedContentsRequest
+ path: /api/python/google/generativeai/protos/ListCachedContentsRequest
+ - title: ListCachedContentsResponse
+ path: /api/python/google/generativeai/protos/ListCachedContentsResponse
+ - title: ListChunksRequest
+ path: /api/python/google/generativeai/protos/ListChunksRequest
+ - title: ListChunksResponse
+ path: /api/python/google/generativeai/protos/ListChunksResponse
+ - title: ListCorporaRequest
+ path: /api/python/google/generativeai/protos/ListCorporaRequest
+ - title: ListCorporaResponse
+ path: /api/python/google/generativeai/protos/ListCorporaResponse
+ - title: ListDocumentsRequest
+ path: /api/python/google/generativeai/protos/ListDocumentsRequest
+ - title: ListDocumentsResponse
+ path: /api/python/google/generativeai/protos/ListDocumentsResponse
+ - title: ListFilesRequest
+ path: /api/python/google/generativeai/protos/ListFilesRequest
+ - title: ListFilesResponse
+ path: /api/python/google/generativeai/protos/ListFilesResponse
+ - title: ListModelsRequest
+ path: /api/python/google/generativeai/protos/ListModelsRequest
+ - title: ListModelsResponse
+ path: /api/python/google/generativeai/protos/ListModelsResponse
+ - title: ListPermissionsRequest
+ path: /api/python/google/generativeai/protos/ListPermissionsRequest
+ - title: ListPermissionsResponse
+ path: /api/python/google/generativeai/protos/ListPermissionsResponse
+ - title: ListTunedModelsRequest
+ path: /api/python/google/generativeai/protos/ListTunedModelsRequest
+ - title: ListTunedModelsResponse
+ path: /api/python/google/generativeai/protos/ListTunedModelsResponse
+ - title: LogprobsResult
+ path: /api/python/google/generativeai/protos/LogprobsResult
+ - title: LogprobsResult.Candidate
+ path: /api/python/google/generativeai/protos/LogprobsResult/Candidate
+ - title: LogprobsResult.TopCandidates
+ path: /api/python/google/generativeai/protos/LogprobsResult/TopCandidates
+ - title: Message
+ path: /api/python/google/generativeai/protos/Message
+ - title: MessagePrompt
+ path: /api/python/google/generativeai/protos/MessagePrompt
+ - title: MetadataFilter
+ path: /api/python/google/generativeai/protos/MetadataFilter
+ - title: Model
+ path: /api/python/google/generativeai/protos/Model
+ - title: Part
+ path: /api/python/google/generativeai/protos/Part
+ - title: Permission
+ path: /api/python/google/generativeai/protos/Permission
+ - title: Permission.GranteeType
+ path: /api/python/google/generativeai/protos/Permission/GranteeType
+ - title: Permission.Role
+ path: /api/python/google/generativeai/protos/Permission/Role
+ - title: PredictRequest
+ path: /api/python/google/generativeai/protos/PredictRequest
+ - title: PredictResponse
+ path: /api/python/google/generativeai/protos/PredictResponse
+ - title: QueryCorpusRequest
+ path: /api/python/google/generativeai/protos/QueryCorpusRequest
+ - title: QueryCorpusResponse
+ path: /api/python/google/generativeai/protos/QueryCorpusResponse
+ - title: QueryDocumentRequest
+ path: /api/python/google/generativeai/protos/QueryDocumentRequest
+ - title: QueryDocumentResponse
+ path: /api/python/google/generativeai/protos/QueryDocumentResponse
+ - title: RelevantChunk
+ path: /api/python/google/generativeai/protos/RelevantChunk
+ - title: RetrievalMetadata
+ path: /api/python/google/generativeai/protos/RetrievalMetadata
+ - title: SafetyFeedback
+ path: /api/python/google/generativeai/protos/SafetyFeedback
+ - title: SafetyRating
+ path: /api/python/google/generativeai/protos/SafetyRating
+ - title: SafetySetting
+ path: /api/python/google/generativeai/protos/SafetySetting
+ - title: Schema
+ path: /api/python/google/generativeai/protos/Schema
+ - title: Schema.PropertiesEntry
+ path: /api/python/google/generativeai/protos/Schema/PropertiesEntry
+ - title: SearchEntryPoint
+ path: /api/python/google/generativeai/protos/SearchEntryPoint
+ - title: Segment
+ path: /api/python/google/generativeai/protos/Segment
+ - title: SemanticRetrieverConfig
+ path: /api/python/google/generativeai/protos/SemanticRetrieverConfig
+ - title: StringList
+ path: /api/python/google/generativeai/protos/StringList
+ - title: TaskType
+ path: /api/python/google/generativeai/protos/TaskType
+ - title: TextCompletion
+ path: /api/python/google/generativeai/protos/TextCompletion
+ - title: TextPrompt
+ path: /api/python/google/generativeai/protos/TextPrompt
+ - title: Tool
+ path: /api/python/google/generativeai/protos/Tool
+ - title: ToolConfig
+ path: /api/python/google/generativeai/protos/ToolConfig
+ - title: TransferOwnershipRequest
+ path: /api/python/google/generativeai/protos/TransferOwnershipRequest
+ - title: TransferOwnershipResponse
+ path: /api/python/google/generativeai/protos/TransferOwnershipResponse
+ - title: TunedModel
+ path: /api/python/google/generativeai/protos/TunedModel
+ - title: TunedModelSource
+ path: /api/python/google/generativeai/protos/TunedModelSource
+ - title: TuningExample
+ path: /api/python/google/generativeai/protos/TuningExample
+ - title: TuningExamples
+ path: /api/python/google/generativeai/protos/TuningExamples
+ - title: TuningSnapshot
+ path: /api/python/google/generativeai/protos/TuningSnapshot
+ - title: TuningTask
+ path: /api/python/google/generativeai/protos/TuningTask
+ - title: Type
+ path: /api/python/google/generativeai/protos/Type
+ - title: UpdateCachedContentRequest
+ path: /api/python/google/generativeai/protos/UpdateCachedContentRequest
+ - title: UpdateChunkRequest
+ path: /api/python/google/generativeai/protos/UpdateChunkRequest
+ - title: UpdateCorpusRequest
+ path: /api/python/google/generativeai/protos/UpdateCorpusRequest
+ - title: UpdateDocumentRequest
+ path: /api/python/google/generativeai/protos/UpdateDocumentRequest
+ - title: UpdatePermissionRequest
+ path: /api/python/google/generativeai/protos/UpdatePermissionRequest
+ - title: UpdateTunedModelRequest
+ path: /api/python/google/generativeai/protos/UpdateTunedModelRequest
+ - title: VideoMetadata
+ path: /api/python/google/generativeai/protos/VideoMetadata
+ - title: types
+ section:
+ - title: Overview
+ path: /api/python/google/generativeai/types
+ - title: AnyModelNameOptions
+ path: /api/python/google/generativeai/types/AnyModelNameOptions
+ - title: AsyncGenerateContentResponse
+ path: /api/python/google/generativeai/types/AsyncGenerateContentResponse
+ - title: BaseModelNameOptions
+ path: /api/python/google/generativeai/types/BaseModelNameOptions
+ - title: BlobDict
+ path: /api/python/google/generativeai/types/BlobDict
+ - title: BlobType
+ path: /api/python/google/generativeai/types/BlobType
+ - title: BlockedPromptException
+ path: /api/python/google/generativeai/types/BlockedPromptException
+ - title: BlockedReason
+ path: /api/python/google/generativeai/types/BlockedReason
+ - title: BrokenResponseError
+ path: /api/python/google/generativeai/types/BrokenResponseError
+ - title: CallableFunctionDeclaration
+ path: /api/python/google/generativeai/types/CallableFunctionDeclaration
+ - title: CitationMetadataDict
+ path: /api/python/google/generativeai/types/CitationMetadataDict
+ - title: CitationSourceDict
+ path: /api/python/google/generativeai/types/CitationSourceDict
+ - title: ContentDict
+ path: /api/python/google/generativeai/types/ContentDict
+ - title: ContentFilterDict
+ path: /api/python/google/generativeai/types/ContentFilterDict
+ - title: ContentType
+ path: /api/python/google/generativeai/types/ContentType
+ - title: ContentsType
+ path: /api/python/google/generativeai/types/ContentsType
+ - title: File
+ path: /api/python/google/generativeai/types/File
+ - title: FileDataDict
+ path: /api/python/google/generativeai/types/FileDataDict
+ - title: FileDataType
+ path: /api/python/google/generativeai/types/FileDataType
+ - title: FunctionDeclaration
+ path: /api/python/google/generativeai/types/FunctionDeclaration
+ - title: FunctionDeclarationType
+ path: /api/python/google/generativeai/types/FunctionDeclarationType
+ - title: FunctionLibrary
+ path: /api/python/google/generativeai/types/FunctionLibrary
+ - title: FunctionLibraryType
+ path: /api/python/google/generativeai/types/FunctionLibraryType
+ - title: GenerateContentResponse
+ path: /api/python/google/generativeai/types/GenerateContentResponse
+ - title: GenerationConfig
+ path: /api/python/google/generativeai/types/GenerationConfig
+ - title: GenerationConfigDict
+ path: /api/python/google/generativeai/types/GenerationConfigDict
+ - title: GenerationConfigType
+ path: /api/python/google/generativeai/types/GenerationConfigType
+ - title: HarmBlockThreshold
+ path: /api/python/google/generativeai/types/HarmBlockThreshold
+ - title: HarmCategory
+ path: /api/python/google/generativeai/types/HarmCategory
+ - title: HarmProbability
+ path: /api/python/google/generativeai/types/HarmProbability
+ - title: IncompleteIterationError
+ path: /api/python/google/generativeai/types/IncompleteIterationError
+ - title: Model
+ path: /api/python/google/generativeai/types/Model
+ - title: ModelsIterable
+ path: /api/python/google/generativeai/types/ModelsIterable
+ - title: PartDict
+ path: /api/python/google/generativeai/types/PartDict
+ - title: PartType
+ path: /api/python/google/generativeai/types/PartType
+ - title: Permission
+ path: /api/python/google/generativeai/types/Permission
+ - title: Permissions
+ path: /api/python/google/generativeai/types/Permissions
+ - title: RequestOptions
+ path: /api/python/google/generativeai/types/RequestOptions
+ - title: RequestOptionsType
+ path: /api/python/google/generativeai/types/RequestOptionsType
+ - title: SafetyFeedbackDict
+ path: /api/python/google/generativeai/types/SafetyFeedbackDict
+ - title: SafetyRatingDict
+ path: /api/python/google/generativeai/types/SafetyRatingDict
+ - title: SafetySettingDict
+ path: /api/python/google/generativeai/types/SafetySettingDict
+ - title: Status
+ path: /api/python/google/generativeai/types/Status
+ - title: StopCandidateException
+ path: /api/python/google/generativeai/types/StopCandidateException
+ - title: StrictContentType
+ path: /api/python/google/generativeai/types/StrictContentType
+ - title: Tool
+ path: /api/python/google/generativeai/types/Tool
+ - title: ToolDict
+ path: /api/python/google/generativeai/types/ToolDict
+ - title: ToolsType
+ path: /api/python/google/generativeai/types/ToolsType
+ - title: TunedModel
+ path: /api/python/google/generativeai/types/TunedModel
+ - title: TunedModelNameOptions
+ path: /api/python/google/generativeai/types/TunedModelNameOptions
+ - title: TunedModelState
+ path: /api/python/google/generativeai/types/TunedModelState
+ - title: TypedDict
+ path: /api/python/google/generativeai/types/TypedDict
+ - title: get_default_file_client
+ path: /api/python/google/generativeai/types/get_default_file_client
+ - title: to_file_data
+ path: /api/python/google/generativeai/types/to_file_data
diff --git a/docs/api/google/generativeai/all_symbols.md b/docs/api/google/generativeai/all_symbols.md
new file mode 100644
index 000000000..bc673a13e
--- /dev/null
+++ b/docs/api/google/generativeai/all_symbols.md
@@ -0,0 +1,261 @@
+# All symbols in Generative AI - Python
+
+
+
+## Primary symbols
+* google.generativeai
+* google.generativeai.ChatSession
+* google.generativeai.GenerationConfig
+* google.generativeai.GenerativeModel
+* google.generativeai.caching
+* google.generativeai.caching.CachedContent
+* google.generativeai.caching.get_default_cache_client
+* google.generativeai.configure
+* google.generativeai.create_tuned_model
+* google.generativeai.delete_file
+* google.generativeai.delete_tuned_model
+* google.generativeai.embed_content
+* google.generativeai.embed_content_async
+* google.generativeai.get_base_model
+* google.generativeai.get_file
+* google.generativeai.get_model
+* google.generativeai.get_operation
+* google.generativeai.get_tuned_model
+* google.generativeai.list_files
+* google.generativeai.list_models
+* google.generativeai.list_operations
+* google.generativeai.list_tuned_models
+* google.generativeai.protos
+* google.generativeai.protos.AttributionSourceId
+* google.generativeai.protos.AttributionSourceId.GroundingPassageId
+* google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk
+* google.generativeai.protos.BatchCreateChunksRequest
+* google.generativeai.protos.BatchCreateChunksResponse
+* google.generativeai.protos.BatchDeleteChunksRequest
+* google.generativeai.protos.BatchEmbedContentsRequest
+* google.generativeai.protos.BatchEmbedContentsResponse
+* google.generativeai.protos.BatchEmbedTextRequest
+* google.generativeai.protos.BatchEmbedTextResponse
+* google.generativeai.protos.BatchUpdateChunksRequest
+* google.generativeai.protos.BatchUpdateChunksResponse
+* google.generativeai.protos.Blob
+* google.generativeai.protos.CachedContent
+* google.generativeai.protos.CachedContent.UsageMetadata
+* google.generativeai.protos.Candidate
+* google.generativeai.protos.Candidate.FinishReason
+* google.generativeai.protos.Chunk
+* google.generativeai.protos.Chunk.State
+* google.generativeai.protos.ChunkData
+* google.generativeai.protos.CitationMetadata
+* google.generativeai.protos.CitationSource
+* google.generativeai.protos.CodeExecution
+* google.generativeai.protos.CodeExecutionResult
+* google.generativeai.protos.CodeExecutionResult.Outcome
+* google.generativeai.protos.Condition
+* google.generativeai.protos.Condition.Operator
+* google.generativeai.protos.Content
+* google.generativeai.protos.ContentEmbedding
+* google.generativeai.protos.ContentFilter
+* google.generativeai.protos.ContentFilter.BlockedReason
+* google.generativeai.protos.Corpus
+* google.generativeai.protos.CountMessageTokensRequest
+* google.generativeai.protos.CountMessageTokensResponse
+* google.generativeai.protos.CountTextTokensRequest
+* google.generativeai.protos.CountTextTokensResponse
+* google.generativeai.protos.CountTokensRequest
+* google.generativeai.protos.CountTokensResponse
+* google.generativeai.protos.CreateCachedContentRequest
+* google.generativeai.protos.CreateChunkRequest
+* google.generativeai.protos.CreateCorpusRequest
+* google.generativeai.protos.CreateDocumentRequest
+* google.generativeai.protos.CreateFileRequest
+* google.generativeai.protos.CreateFileResponse
+* google.generativeai.protos.CreatePermissionRequest
+* google.generativeai.protos.CreateTunedModelMetadata
+* google.generativeai.protos.CreateTunedModelRequest
+* google.generativeai.protos.CustomMetadata
+* google.generativeai.protos.Dataset
+* google.generativeai.protos.DeleteCachedContentRequest
+* google.generativeai.protos.DeleteChunkRequest
+* google.generativeai.protos.DeleteCorpusRequest
+* google.generativeai.protos.DeleteDocumentRequest
+* google.generativeai.protos.DeleteFileRequest
+* google.generativeai.protos.DeletePermissionRequest
+* google.generativeai.protos.DeleteTunedModelRequest
+* google.generativeai.protos.Document
+* google.generativeai.protos.DynamicRetrievalConfig
+* google.generativeai.protos.DynamicRetrievalConfig.Mode
+* google.generativeai.protos.EmbedContentRequest
+* google.generativeai.protos.EmbedContentResponse
+* google.generativeai.protos.EmbedTextRequest
+* google.generativeai.protos.EmbedTextResponse
+* google.generativeai.protos.Embedding
+* google.generativeai.protos.Example
+* google.generativeai.protos.ExecutableCode
+* google.generativeai.protos.ExecutableCode.Language
+* google.generativeai.protos.File
+* google.generativeai.protos.File.State
+* google.generativeai.protos.FileData
+* google.generativeai.protos.FunctionCall
+* google.generativeai.protos.FunctionCallingConfig
+* google.generativeai.protos.FunctionCallingConfig.Mode
+* google.generativeai.protos.FunctionDeclaration
+* google.generativeai.protos.FunctionResponse
+* google.generativeai.protos.GenerateAnswerRequest
+* google.generativeai.protos.GenerateAnswerRequest.AnswerStyle
+* google.generativeai.protos.GenerateAnswerResponse
+* google.generativeai.protos.GenerateAnswerResponse.InputFeedback
+* google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason
+* google.generativeai.protos.GenerateContentRequest
+* google.generativeai.protos.GenerateContentResponse
+* google.generativeai.protos.GenerateContentResponse.PromptFeedback
+* google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason
+* google.generativeai.protos.GenerateContentResponse.UsageMetadata
+* google.generativeai.protos.GenerateMessageRequest
+* google.generativeai.protos.GenerateMessageResponse
+* google.generativeai.protos.GenerateTextRequest
+* google.generativeai.protos.GenerateTextResponse
+* google.generativeai.protos.GenerationConfig
+* google.generativeai.protos.GetCachedContentRequest
+* google.generativeai.protos.GetChunkRequest
+* google.generativeai.protos.GetCorpusRequest
+* google.generativeai.protos.GetDocumentRequest
+* google.generativeai.protos.GetFileRequest
+* google.generativeai.protos.GetModelRequest
+* google.generativeai.protos.GetPermissionRequest
+* google.generativeai.protos.GetTunedModelRequest
+* google.generativeai.protos.GoogleSearchRetrieval
+* google.generativeai.protos.GroundingAttribution
+* google.generativeai.protos.GroundingChunk
+* google.generativeai.protos.GroundingChunk.Web
+* google.generativeai.protos.GroundingMetadata
+* google.generativeai.protos.GroundingPassage
+* google.generativeai.protos.GroundingPassages
+* google.generativeai.protos.GroundingSupport
+* google.generativeai.protos.HarmCategory
+* google.generativeai.protos.Hyperparameters
+* google.generativeai.protos.ListCachedContentsRequest
+* google.generativeai.protos.ListCachedContentsResponse
+* google.generativeai.protos.ListChunksRequest
+* google.generativeai.protos.ListChunksResponse
+* google.generativeai.protos.ListCorporaRequest
+* google.generativeai.protos.ListCorporaResponse
+* google.generativeai.protos.ListDocumentsRequest
+* google.generativeai.protos.ListDocumentsResponse
+* google.generativeai.protos.ListFilesRequest
+* google.generativeai.protos.ListFilesResponse
+* google.generativeai.protos.ListModelsRequest
+* google.generativeai.protos.ListModelsResponse
+* google.generativeai.protos.ListPermissionsRequest
+* google.generativeai.protos.ListPermissionsResponse
+* google.generativeai.protos.ListTunedModelsRequest
+* google.generativeai.protos.ListTunedModelsResponse
+* google.generativeai.protos.LogprobsResult
+* google.generativeai.protos.LogprobsResult.Candidate
+* google.generativeai.protos.LogprobsResult.TopCandidates
+* google.generativeai.protos.Message
+* google.generativeai.protos.MessagePrompt
+* google.generativeai.protos.MetadataFilter
+* google.generativeai.protos.Model
+* google.generativeai.protos.Part
+* google.generativeai.protos.Permission
+* google.generativeai.protos.Permission.GranteeType
+* google.generativeai.protos.Permission.Role
+* google.generativeai.protos.PredictRequest
+* google.generativeai.protos.PredictResponse
+* google.generativeai.protos.QueryCorpusRequest
+* google.generativeai.protos.QueryCorpusResponse
+* google.generativeai.protos.QueryDocumentRequest
+* google.generativeai.protos.QueryDocumentResponse
+* google.generativeai.protos.RelevantChunk
+* google.generativeai.protos.RetrievalMetadata
+* google.generativeai.protos.SafetyFeedback
+* google.generativeai.protos.SafetyRating
+* google.generativeai.protos.SafetyRating.HarmProbability
+* google.generativeai.protos.SafetySetting
+* google.generativeai.protos.SafetySetting.HarmBlockThreshold
+* google.generativeai.protos.Schema
+* google.generativeai.protos.Schema.PropertiesEntry
+* google.generativeai.protos.SearchEntryPoint
+* google.generativeai.protos.Segment
+* google.generativeai.protos.SemanticRetrieverConfig
+* google.generativeai.protos.StringList
+* google.generativeai.protos.TaskType
+* google.generativeai.protos.TextCompletion
+* google.generativeai.protos.TextPrompt
+* google.generativeai.protos.Tool
+* google.generativeai.protos.ToolConfig
+* google.generativeai.protos.TransferOwnershipRequest
+* google.generativeai.protos.TransferOwnershipResponse
+* google.generativeai.protos.TunedModel
+* google.generativeai.protos.TunedModel.State
+* google.generativeai.protos.TunedModelSource
+* google.generativeai.protos.TuningExample
+* google.generativeai.protos.TuningExamples
+* google.generativeai.protos.TuningSnapshot
+* google.generativeai.protos.TuningTask
+* google.generativeai.protos.Type
+* google.generativeai.protos.UpdateCachedContentRequest
+* google.generativeai.protos.UpdateChunkRequest
+* google.generativeai.protos.UpdateCorpusRequest
+* google.generativeai.protos.UpdateDocumentRequest
+* google.generativeai.protos.UpdatePermissionRequest
+* google.generativeai.protos.UpdateTunedModelRequest
+* google.generativeai.protos.VideoMetadata
+* google.generativeai.types
+* google.generativeai.types.AnyModelNameOptions
+* google.generativeai.types.AsyncGenerateContentResponse
+* google.generativeai.types.BaseModelNameOptions
+* google.generativeai.types.BlobDict
+* google.generativeai.types.BlobType
+* google.generativeai.types.BlockedPromptException
+* google.generativeai.types.BlockedReason
+* google.generativeai.types.BrokenResponseError
+* google.generativeai.types.CallableFunctionDeclaration
+* google.generativeai.types.CitationMetadataDict
+* google.generativeai.types.CitationSourceDict
+* google.generativeai.types.ContentDict
+* google.generativeai.types.ContentFilterDict
+* google.generativeai.types.ContentType
+* google.generativeai.types.ContentsType
+* google.generativeai.types.File
+* google.generativeai.types.FileDataDict
+* google.generativeai.types.FileDataType
+* google.generativeai.types.FunctionDeclaration
+* google.generativeai.types.FunctionDeclarationType
+* google.generativeai.types.FunctionLibrary
+* google.generativeai.types.FunctionLibraryType
+* google.generativeai.types.GenerateContentResponse
+* google.generativeai.types.GenerationConfig
+* google.generativeai.types.GenerationConfigDict
+* google.generativeai.types.GenerationConfigType
+* google.generativeai.types.HarmBlockThreshold
+* google.generativeai.types.HarmCategory
+* google.generativeai.types.HarmProbability
+* google.generativeai.types.IncompleteIterationError
+* google.generativeai.types.Model
+* google.generativeai.types.ModelNameOptions
+* google.generativeai.types.ModelsIterable
+* google.generativeai.types.PartDict
+* google.generativeai.types.PartType
+* google.generativeai.types.Permission
+* google.generativeai.types.Permissions
+* google.generativeai.types.RequestOptions
+* google.generativeai.types.RequestOptionsType
+* google.generativeai.types.SafetyFeedbackDict
+* google.generativeai.types.SafetyRatingDict
+* google.generativeai.types.SafetySettingDict
+* google.generativeai.types.Status
+* google.generativeai.types.StopCandidateException
+* google.generativeai.types.StrictContentType
+* google.generativeai.types.Tool
+* google.generativeai.types.ToolDict
+* google.generativeai.types.ToolsType
+* google.generativeai.types.TunedModel
+* google.generativeai.types.TunedModelNameOptions
+* google.generativeai.types.TunedModelState
+* google.generativeai.types.TypedDict
+* google.generativeai.types.get_default_file_client
+* google.generativeai.types.to_file_data
+* google.generativeai.update_tuned_model
+* google.generativeai.upload_file
\ No newline at end of file
diff --git a/docs/api/google/generativeai/api_report.pb b/docs/api/google/generativeai/api_report.pb
new file mode 100644
index 000000000..bd0f3970f
Binary files /dev/null and b/docs/api/google/generativeai/api_report.pb differ
diff --git a/docs/api/google/generativeai/caching.md b/docs/api/google/generativeai/caching.md
new file mode 100644
index 000000000..784ab1b80
--- /dev/null
+++ b/docs/api/google/generativeai/caching.md
@@ -0,0 +1,49 @@
+
+# Module: google.generativeai.caching
+
+
+
+
+
+
+
+
+
+
+
+## Classes
+
+[`class CachedContent`](../../google/generativeai/caching/CachedContent.md): Cached content resource.
+
+## Functions
+
+[`get_default_cache_client(...)`](../../google/generativeai/caching/get_default_cache_client.md)
+
+
+
+
+
+
+Other Members |
+
+
+
+
+annotations
+
+ |
+
+
+Instance of `__future__._Feature`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/caching/CachedContent.md b/docs/api/google/generativeai/caching/CachedContent.md
new file mode 100644
index 000000000..a0efa473d
--- /dev/null
+++ b/docs/api/google/generativeai/caching/CachedContent.md
@@ -0,0 +1,448 @@
+
+# google.generativeai.caching.CachedContent
+
+
+
+
+
+
+
+Cached content resource.
+
+
+google.generativeai.caching.CachedContent(
+ name
+)
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The resource name referring to the cached content.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`create_time`
+
+ |
+
+
+
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+
+
+ |
+
+
+
+`expire_time`
+
+ |
+
+
+
+
+ |
+
+
+
+`model`
+
+ |
+
+
+
+
+ |
+
+
+
+`name`
+
+ |
+
+
+
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+
+
+ |
+
+
+
+`usage_metadata`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+create
+
+View source
+
+
+@classmethod
+create(
+ model: str,
+ *,
+ display_name: (str | None) = None,
+ system_instruction: Optional[content_types.ContentType] = None,
+ contents: Optional[content_types.ContentsType] = None,
+ tools: Optional[content_types.FunctionLibraryType] = None,
+ tool_config: Optional[content_types.ToolConfigType] = None,
+ ttl: Optional[caching_types.TTLTypes] = None,
+ expire_time: Optional[caching_types.ExpireTimeTypes] = None
+) -> CachedContent
+
+
+Creates `CachedContent` resource.
+
+
+
+
+
+Args |
+
+
+
+
+`model`
+
+ |
+
+
+The name of the `model` to use for cached content creation.
+Any `CachedContent` resource can be only used with the
+`model` it was created for.
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+The user-generated meaningful display name
+of the cached content. `display_name` must be no
+more than 128 unicode characters.
+
+ |
+
+
+
+`system_instruction`
+
+ |
+
+
+Developer set system instruction.
+
+ |
+
+
+
+`contents`
+
+ |
+
+
+Contents to cache.
+
+ |
+
+
+
+`tools`
+
+ |
+
+
+A list of `Tools` the model may use to generate response.
+
+ |
+
+
+
+`tool_config`
+
+ |
+
+
+Config to apply to all tools.
+
+ |
+
+
+
+`ttl`
+
+ |
+
+
+TTL for cached resource (in seconds). Defaults to 1 hour.
+`ttl` and `expire_time` are exclusive arguments.
+
+ |
+
+
+
+`expire_time`
+
+ |
+
+
+Expiration time for cached resource.
+`ttl` and `expire_time` are exclusive arguments.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+`CachedContent` resource with specified name.
+
+ |
+
+
+
+
+
+
+delete
+
+View source
+
+
+delete() -> None
+
+
+Deletes `CachedContent` resource.
+
+
+get
+
+View source
+
+
+@classmethod
+get(
+ name: str
+) -> CachedContent
+
+
+Fetches required `CachedContent` resource.
+
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The resource name referring to the cached content.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+`CachedContent` resource with specified `name`.
+
+ |
+
+
+
+
+
+
+list
+
+View source
+
+
+@classmethod
+list(
+ page_size: Optional[int] = 1
+) -> Iterable[CachedContent]
+
+
+Lists `CachedContent` objects associated with the project.
+
+
+
+
+
+Args |
+
+
+
+
+`page_size`
+
+ |
+
+
+The maximum number of permissions to return (per page).
+The service may return fewer `CachedContent` objects.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+A paginated list of `CachedContent` objects.
+
+ |
+
+
+
+
+
+
+update
+
+View source
+
+
+update(
+ *,
+ ttl: Optional[caching_types.TTLTypes] = None,
+ expire_time: Optional[caching_types.ExpireTimeTypes] = None
+) -> None
+
+
+Updates requested `CachedContent` resource.
+
+
+
+
+
+Args |
+
+
+
+
+`ttl`
+
+ |
+
+
+TTL for cached resource (in seconds). Defaults to 1 hour.
+`ttl` and `expire_time` are exclusive arguments.
+
+ |
+
+
+
+`expire_time`
+
+ |
+
+
+Expiration time for cached resource.
+`ttl` and `expire_time` are exclusive arguments.
+
+ |
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/caching/get_default_cache_client.md b/docs/api/google/generativeai/caching/get_default_cache_client.md
new file mode 100644
index 000000000..8457f5c14
--- /dev/null
+++ b/docs/api/google/generativeai/caching/get_default_cache_client.md
@@ -0,0 +1,26 @@
+
+# google.generativeai.caching.get_default_cache_client
+
+
+
+
+
+
+
+
+
+
+
+google.generativeai.caching.get_default_cache_client() -> glm.CacheServiceClient
+
+
+
+
+
diff --git a/docs/api/google/generativeai/configure.md b/docs/api/google/generativeai/configure.md
new file mode 100644
index 000000000..81c9e19be
--- /dev/null
+++ b/docs/api/google/generativeai/configure.md
@@ -0,0 +1,86 @@
+
+# google.generativeai.configure
+
+
+
+
+
+
+
+Captures default client configuration.
+
+
+
+google.generativeai.configure(
+ *,
+ api_key: (str | None) = None,
+ credentials: (ga_credentials.Credentials | dict | None) = None,
+ transport: (str | None) = None,
+ client_options: (client_options_lib.ClientOptions | dict | None) = None,
+ client_info: (gapic_v1.client_info.ClientInfo | None) = None,
+ default_metadata: Sequence[tuple[str, str]] = ()
+)
+
+
+
+
+
+
+If no API key has been provided (either directly, or on `client_options`) and the
+`GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
+
+Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in
+`google.ai.generativelanguage` for details on the other arguments.
+
+
+
+
+Args |
+
+
+
+
+`transport`
+
+ |
+
+
+A string, one of: [`rest`, `grpc`, `grpc_asyncio`].
+
+ |
+
+
+
+`api_key`
+
+ |
+
+
+The API-Key to use when creating the default clients (each service uses
+a separate client). This is a shortcut for `client_options={"api_key": api_key}`.
+If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be
+used.
+
+ |
+
+
+
+`default_metadata`
+
+ |
+
+
+Default (key, value) metadata pairs to send with every request.
+when using `transport="rest"` these are sent as HTTP headers.
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/create_tuned_model.md b/docs/api/google/generativeai/create_tuned_model.md
new file mode 100644
index 000000000..04ce93d75
--- /dev/null
+++ b/docs/api/google/generativeai/create_tuned_model.md
@@ -0,0 +1,246 @@
+
+# google.generativeai.create_tuned_model
+
+
+
+
+
+
+
+Calls the API to initiate a tuning process that optimizes a model for specific data, returning an operation object to track and manage the tuning progress.
+
+
+
+google.generativeai.create_tuned_model(
+ source_model: model_types.AnyModelNameOptions,
+ training_data: model_types.TuningDataOptions,
+ *,
+ id: (str | None) = None,
+ display_name: (str | None) = None,
+ description: (str | None) = None,
+ temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (int | None) = None,
+ epoch_count: (int | None) = None,
+ batch_size: (int | None) = None,
+ learning_rate: (float | None) = None,
+ input_key: str = 'text_input',
+ output_key: str = 'output',
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> operations.CreateTunedModelOperation
+
+
+
+
+
+
+Since tuning a model can take significant time, this API doesn't wait for the tuning to complete.
+Instead, it returns a `google.api_core.operation.Operation` object that lets you check on the
+status of the tuning job, or wait for it to complete, and check the result.
+
+After the job completes you can either find the resulting `TunedModel` object in
+`Operation.result()` or `palm.list_tuned_models` or `palm.get_tuned_model(model_id)`.
+
+```
+my_id = "my-tuned-model-id"
+operation = palm.create_tuned_model(
+ id = my_id,
+ source_model="models/text-bison-001",
+ training_data=[{'text_input': 'example input', 'output': 'example output'},...]
+)
+tuned_model=operation.result() # Wait for tuning to finish
+
+palm.generate_text(f"tunedModels/{my_id}", prompt="...")
+```
+
+
+
+
+Args |
+
+
+
+
+`source_model`
+
+ |
+
+
+The name of the model to tune.
+
+ |
+
+
+
+`training_data`
+
+ |
+
+
+The dataset to tune the model on. This must be either:
+* A protos.Dataset , or
+* An `Iterable` of:
+ *protos.TuningExample ,
+ * `{'text_input': text_input, 'output': output}` dicts
+ * `(text_input, output)` tuples.
+* A `Mapping` of `Iterable[str]` - use `input_key` and `output_key` to choose which
+ columns to use as the input/output
+* A csv file (will be read with `pd.read_csv` and handles as a `Mapping`
+ above). This can be:
+ * A local path as a `str` or `pathlib.Path`.
+ * A url for a csv file.
+ * The url of a Google Sheets file.
+* A JSON file - Its contents will be handled either as an `Iterable` or `Mapping`
+ above. This can be:
+ * A local path as a `str` or `pathlib.Path`.
+
+ |
+
+
+
+`id`
+
+ |
+
+
+The model identifier, used to refer to the model in the API
+`tunedModels/{id}`. Must be unique.
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+A human-readable name for display.
+
+ |
+
+
+
+`description`
+
+ |
+
+
+A description of the tuned model.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+The default temperature for the tuned model, see types.Model for details.
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+The default `top_p` for the model, see types.Model for details.
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+The default `top_k` for the model, see types.Model for details.
+
+ |
+
+
+
+`epoch_count`
+
+ |
+
+
+The number of tuning epochs to run. An epoch is a pass over the whole dataset.
+
+ |
+
+
+
+`batch_size`
+
+ |
+
+
+The number of examples to use in each training batch.
+
+ |
+
+
+
+`learning_rate`
+
+ |
+
+
+The step size multiplier for the gradient updates.
+
+ |
+
+
+
+`client`
+
+ |
+
+
+Which client to use.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+A [`google.api_core.operation.Operation`](https://googleapis.dev/python/google-api-core/latest/operation.html)
+
+ |
+
+
+
+
diff --git a/docs/api/google/generativeai/delete_file.md b/docs/api/google/generativeai/delete_file.md
new file mode 100644
index 000000000..5a3bfc295
--- /dev/null
+++ b/docs/api/google/generativeai/delete_file.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.delete_file
+
+
+
+
+
+
+
+Calls the API to permanently delete a specified file using a supported file service.
+
+
+
+google.generativeai.delete_file(
+ name: (str | file_types.File | protos.File)
+)
+
+
+
+
+
diff --git a/docs/api/google/generativeai/delete_tuned_model.md b/docs/api/google/generativeai/delete_tuned_model.md
new file mode 100644
index 000000000..5b1396cea
--- /dev/null
+++ b/docs/api/google/generativeai/delete_tuned_model.md
@@ -0,0 +1,30 @@
+
+# google.generativeai.delete_tuned_model
+
+
+
+
+
+
+
+Calls the API to delete a specified tuned model
+
+
+
+google.generativeai.delete_tuned_model(
+ tuned_model: model_types.TunedModelNameOptions,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> None
+
+
+
+
+
diff --git a/docs/api/google/generativeai/embed_content.md b/docs/api/google/generativeai/embed_content.md
new file mode 100644
index 000000000..af0e3c99b
--- /dev/null
+++ b/docs/api/google/generativeai/embed_content.md
@@ -0,0 +1,132 @@
+
+# google.generativeai.embed_content
+
+
+
+
+
+
+
+Calls the API to create embeddings for content passed in.
+
+
+
+google.generativeai.embed_content(
+ model: model_types.BaseModelNameOptions,
+ content: (content_types.ContentType | Iterable[content_types.ContentType]),
+ task_type: (EmbeddingTaskTypeOptions | None) = None,
+ title: (str | None) = None,
+ output_dimensionality: (int | None) = None,
+ client: glm.GenerativeServiceClient = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> (text_types.EmbeddingDict | text_types.BatchEmbeddingDict)
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+
+`model`
+
+ |
+
+
+ Which [model](https://ai.google.dev/models/gemini#embedding) to
+call, as a string or a types.Model .
+
+ |
+
+
+
+`content`
+
+ |
+
+
+ Content to embed.
+
+ |
+
+
+
+`task_type`
+
+ |
+
+
+ Optional task type for which the embeddings will be used. Can only
+be set for `models/embedding-001`.
+
+ |
+
+
+
+`title`
+
+ |
+
+
+ An optional title for the text. Only applicable when task_type is
+`RETRIEVAL_DOCUMENT`.
+
+ |
+
+
+
+`output_dimensionality`
+
+ |
+
+
+ Optional reduced dimensionality for the output embeddings. If set,
+excessive values from the output embeddings will be truncated from
+the end.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+ Options for the request.
+
+ |
+
+
+
+
+
+
+
+
+Return |
+
+
+
+Dictionary containing the embedding (list of float values) for the
+input content.
+
+ |
+
+
+
+
diff --git a/docs/api/google/generativeai/embed_content_async.md b/docs/api/google/generativeai/embed_content_async.md
new file mode 100644
index 000000000..8c8ebc9c2
--- /dev/null
+++ b/docs/api/google/generativeai/embed_content_async.md
@@ -0,0 +1,34 @@
+
+# google.generativeai.embed_content_async
+
+
+
+
+
+
+
+Calls the API to create async embeddings for content passed in.
+
+
+
+google.generativeai.embed_content_async(
+ model,
+ content,
+ task_type=None,
+ title=None,
+ output_dimensionality=None,
+ client=None,
+ request_options=None
+)
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_base_model.md b/docs/api/google/generativeai/get_base_model.md
new file mode 100644
index 000000000..760acc975
--- /dev/null
+++ b/docs/api/google/generativeai/get_base_model.md
@@ -0,0 +1,95 @@
+
+# google.generativeai.get_base_model
+
+
+
+
+
+
+
+Calls the API to fetch a base model by name.
+
+
+
+google.generativeai.get_base_model(
+ name: model_types.BaseModelNameOptions,
+ *,
+ client=None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.Model
+
+
+
+
+
+
+```
+import pprint
+model = genai.get_base_model('models/chat-bison-001')
+pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The name of the model to fetch. Should start with `models/`
+
+ |
+
+
+
+`client`
+
+ |
+
+
+The client to use.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_file.md b/docs/api/google/generativeai/get_file.md
new file mode 100644
index 000000000..18000cd7c
--- /dev/null
+++ b/docs/api/google/generativeai/get_file.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.get_file
+
+
+
+
+
+
+
+Calls the API to retrieve a specified file using a supported file service.
+
+
+
+google.generativeai.get_file(
+ name: str
+) -> file_types.File
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_model.md b/docs/api/google/generativeai/get_model.md
new file mode 100644
index 000000000..cba445afc
--- /dev/null
+++ b/docs/api/google/generativeai/get_model.md
@@ -0,0 +1,95 @@
+
+# google.generativeai.get_model
+
+
+
+
+
+
+
+Calls the API to fetch a model by name.
+
+
+
+google.generativeai.get_model(
+ name: model_types.AnyModelNameOptions,
+ *,
+ client=None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> (model_types.Model | model_types.TunedModel)
+
+
+
+
+
+
+```
+import pprint
+model = genai.get_model('models/gemini-1.5-flash')
+pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The name of the model to fetch. Should start with `models/`
+
+ |
+
+
+
+`client`
+
+ |
+
+
+The client to use.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_operation.md b/docs/api/google/generativeai/get_operation.md
new file mode 100644
index 000000000..c4041621c
--- /dev/null
+++ b/docs/api/google/generativeai/get_operation.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.get_operation
+
+
+
+
+
+
+
+Calls the API to get a specific operation
+
+
+
+google.generativeai.get_operation(
+ name: str, *, client=None
+) -> CreateTunedModelOperation
+
+
+
+
+
diff --git a/docs/api/google/generativeai/get_tuned_model.md b/docs/api/google/generativeai/get_tuned_model.md
new file mode 100644
index 000000000..72b62dac8
--- /dev/null
+++ b/docs/api/google/generativeai/get_tuned_model.md
@@ -0,0 +1,95 @@
+
+# google.generativeai.get_tuned_model
+
+
+
+
+
+
+
+Calls the API to fetch a tuned model by name.
+
+
+
+google.generativeai.get_tuned_model(
+ name: model_types.TunedModelNameOptions,
+ *,
+ client=None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.TunedModel
+
+
+
+
+
+
+```
+import pprint
+model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001')
+pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The name of the model to fetch. Should start with `tunedModels/`
+
+ |
+
+
+
+`client`
+
+ |
+
+
+The client to use.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_files.md b/docs/api/google/generativeai/list_files.md
new file mode 100644
index 000000000..ceafc9677
--- /dev/null
+++ b/docs/api/google/generativeai/list_files.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.list_files
+
+
+
+
+
+
+
+Calls the API to list files using a supported file service.
+
+
+
+google.generativeai.list_files(
+ page_size=100
+) -> Iterable[file_types.File]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_models.md b/docs/api/google/generativeai/list_models.md
new file mode 100644
index 000000000..befb027db
--- /dev/null
+++ b/docs/api/google/generativeai/list_models.md
@@ -0,0 +1,95 @@
+
+# google.generativeai.list_models
+
+
+
+
+
+
+
+Calls the API to list all available models.
+
+
+
+google.generativeai.list_models(
+ *,
+ page_size: (int | None) = 50,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.ModelsIterable
+
+
+
+
+
+
+```
+import pprint
+for model in genai.list_models():
+ pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+
+`page_size`
+
+ |
+
+
+How many `types.Models` to fetch per page (api call).
+
+ |
+
+
+
+`client`
+
+ |
+
+
+You may pass a `glm.ModelServiceClient` instead of using the default client.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_operations.md b/docs/api/google/generativeai/list_operations.md
new file mode 100644
index 000000000..d28a21022
--- /dev/null
+++ b/docs/api/google/generativeai/list_operations.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.list_operations
+
+
+
+
+
+
+
+Calls the API to list all operations
+
+
+
+google.generativeai.list_operations(
+ *, client=None
+) -> Iterator[CreateTunedModelOperation]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/list_tuned_models.md b/docs/api/google/generativeai/list_tuned_models.md
new file mode 100644
index 000000000..71558375b
--- /dev/null
+++ b/docs/api/google/generativeai/list_tuned_models.md
@@ -0,0 +1,95 @@
+
+# google.generativeai.list_tuned_models
+
+
+
+
+
+
+
+Calls the API to list all tuned models.
+
+
+
+google.generativeai.list_tuned_models(
+ *,
+ page_size: (int | None) = 50,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.TunedModelsIterable
+
+
+
+
+
+
+```
+import pprint
+for model in genai.list_tuned_models():
+ pprint.pprint(model)
+```
+
+
+
+
+Args |
+
+
+
+
+`page_size`
+
+ |
+
+
+How many `types.Models` to fetch per page (api call).
+
+ |
+
+
+
+`client`
+
+ |
+
+
+You may pass a `glm.ModelServiceClient` instead of using the default client.
+
+ |
+
+
+
+`request_options`
+
+ |
+
+
+Options for the request.
+
+ |
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos.md b/docs/api/google/generativeai/protos.md
new file mode 100644
index 000000000..a01b25324
--- /dev/null
+++ b/docs/api/google/generativeai/protos.md
@@ -0,0 +1,384 @@
+
+# Module: google.generativeai.protos
+
+
+
+
+
+
+
+This module provides low level access to the ProtoBuffer "Message" classes used by the API.
+
+
+**For typical usage of this SDK you do not need to use any of these classes.**
+
+ProtoBufers are Google API's serilization format. They are strongly typed and efficient.
+
+The `genai` SDK tries to be permissive about what objects it will accept from a user, but in the end
+the SDK always converts input to an appropriate Proto Message object to send as the request. Each API request
+has a `*Request` and `*Response` Message defined here.
+
+If you have any uncertainty about what the API may accept or return, these classes provide the
+complete/unambiguous answer. They come from the `google-ai-generativelanguage` package which is
+generated from a snapshot of the API definition.
+
+```
+>>> from google.generativeai import protos
+>>> import inspect
+>>> print(inspect.getsource(protos.Part))
+```
+
+Proto classes can have "oneof" fields. Use `in` to check which `oneof` field is set.
+
+```
+>>> p = protos.Part(text='hello')
+>>> 'text' in p
+True
+>>> p.inline_data = {'mime_type':'image/png', 'data': b'PNG'}
+>>> type(p.inline_data) is protos.Blob
+True
+>>> 'inline_data' in p
+True
+>>> 'text' in p
+False
+```
+
+Instances of all Message classes can be converted into JSON compatible dictionaries with the following construct
+(Bytes are base64 encoded):
+
+```
+>>> p_dict = type(p).to_dict(p)
+>>> p_dict
+{'inline_data': {'mime_type': 'image/png', 'data': 'UE5H'}}
+```
+
+A compatible dict can be converted to an instance of a Message class by passing it as the first argument to the
+constructor:
+
+```
+>>> p = protos.Part(p_dict)
+inline_data {
+ mime_type: "image/png"
+ data: "PNG"
+}
+```
+
+Note when converting that `to_dict` accepts additional arguments:
+
+- `use_integers_for_enums:bool = True`, Set it to `False` to replace enum int values with their string
+ names in the output
+- ` including_default_value_fields:bool = True`, Set it to `False` to reduce the verbosity of the output.
+
+Additional arguments are described in the docstring:
+
+```
+>>> help(proto.Part.to_dict)
+```
+
+## Classes
+
+[`class AttributionSourceId`](../../google/generativeai/protos/AttributionSourceId.md): Identifier for the source contributing to this attribution.
+
+[`class BatchCreateChunksRequest`](../../google/generativeai/protos/BatchCreateChunksRequest.md): Request to batch create ``Chunk``\ s.
+
+[`class BatchCreateChunksResponse`](../../google/generativeai/protos/BatchCreateChunksResponse.md): Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s.
+
+[`class BatchDeleteChunksRequest`](../../google/generativeai/protos/BatchDeleteChunksRequest.md): Request to batch delete ``Chunk``\ s.
+
+[`class BatchEmbedContentsRequest`](../../google/generativeai/protos/BatchEmbedContentsRequest.md): Batch request to get embeddings from the model for a list of prompts.
+
+[`class BatchEmbedContentsResponse`](../../google/generativeai/protos/BatchEmbedContentsResponse.md): The response to a ``BatchEmbedContentsRequest``.
+
+[`class BatchEmbedTextRequest`](../../google/generativeai/protos/BatchEmbedTextRequest.md): Batch request to get a text embedding from the model.
+
+[`class BatchEmbedTextResponse`](../../google/generativeai/protos/BatchEmbedTextResponse.md): The response to a EmbedTextRequest.
+
+[`class BatchUpdateChunksRequest`](../../google/generativeai/protos/BatchUpdateChunksRequest.md): Request to batch update ``Chunk``\ s.
+
+[`class BatchUpdateChunksResponse`](../../google/generativeai/protos/BatchUpdateChunksResponse.md): Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s.
+
+[`class Blob`](../../google/generativeai/protos/Blob.md): Raw media bytes.
+
+[`class CachedContent`](../../google/generativeai/protos/CachedContent.md): Content that has been preprocessed and can be used in subsequent request to GenerativeService.
+
+[`class Candidate`](../../google/generativeai/protos/Candidate.md): A response candidate generated from the model.
+
+[`class Chunk`](../../google/generativeai/protos/Chunk.md): A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage.
+
+[`class ChunkData`](../../google/generativeai/protos/ChunkData.md): Extracted data that represents the ``Chunk`` content.
+
+[`class CitationMetadata`](../../google/generativeai/protos/CitationMetadata.md): A collection of source attributions for a piece of content.
+
+[`class CitationSource`](../../google/generativeai/protos/CitationSource.md): A citation to a source for a portion of a specific response.
+
+[`class CodeExecution`](../../google/generativeai/protos/CodeExecution.md): Tool that executes code generated by the model, and automatically returns the result to the model.
+
+[`class CodeExecutionResult`](../../google/generativeai/protos/CodeExecutionResult.md): Result of executing the ``ExecutableCode``.
+
+[`class Condition`](../../google/generativeai/protos/Condition.md): Filter condition applicable to a single key.
+
+[`class Content`](../../google/generativeai/protos/Content.md): The base structured datatype containing multi-part content of a message.
+
+[`class ContentEmbedding`](../../google/generativeai/protos/ContentEmbedding.md): A list of floats representing an embedding.
+
+[`class ContentFilter`](../../google/generativeai/protos/ContentFilter.md): Content filtering metadata associated with processing a single request.
+
+[`class Corpus`](../../google/generativeai/protos/Corpus.md): A ``Corpus`` is a collection of ``Document``\ s.
+
+[`class CountMessageTokensRequest`](../../google/generativeai/protos/CountMessageTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
+
+[`class CountMessageTokensResponse`](../../google/generativeai/protos/CountMessageTokensResponse.md): A response from ``CountMessageTokens``.
+
+[`class CountTextTokensRequest`](../../google/generativeai/protos/CountTextTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
+
+[`class CountTextTokensResponse`](../../google/generativeai/protos/CountTextTokensResponse.md): A response from ``CountTextTokens``.
+
+[`class CountTokensRequest`](../../google/generativeai/protos/CountTokensRequest.md): Counts the number of tokens in the ``prompt`` sent to a model.
+
+[`class CountTokensResponse`](../../google/generativeai/protos/CountTokensResponse.md): A response from ``CountTokens``.
+
+[`class CreateCachedContentRequest`](../../google/generativeai/protos/CreateCachedContentRequest.md): Request to create CachedContent.
+
+[`class CreateChunkRequest`](../../google/generativeai/protos/CreateChunkRequest.md): Request to create a ``Chunk``.
+
+[`class CreateCorpusRequest`](../../google/generativeai/protos/CreateCorpusRequest.md): Request to create a ``Corpus``.
+
+[`class CreateDocumentRequest`](../../google/generativeai/protos/CreateDocumentRequest.md): Request to create a ``Document``.
+
+[`class CreateFileRequest`](../../google/generativeai/protos/CreateFileRequest.md): Request for ``CreateFile``.
+
+[`class CreateFileResponse`](../../google/generativeai/protos/CreateFileResponse.md): Response for ``CreateFile``.
+
+[`class CreatePermissionRequest`](../../google/generativeai/protos/CreatePermissionRequest.md): Request to create a ``Permission``.
+
+[`class CreateTunedModelMetadata`](../../google/generativeai/protos/CreateTunedModelMetadata.md): Metadata about the state and progress of creating a tuned model returned from the long-running operation
+
+[`class CreateTunedModelRequest`](../../google/generativeai/protos/CreateTunedModelRequest.md): Request to create a TunedModel.
+
+[`class CustomMetadata`](../../google/generativeai/protos/CustomMetadata.md): User provided metadata stored as key-value pairs.
+
+[`class Dataset`](../../google/generativeai/protos/Dataset.md): Dataset for training or validation.
+
+[`class DeleteCachedContentRequest`](../../google/generativeai/protos/DeleteCachedContentRequest.md): Request to delete CachedContent.
+
+[`class DeleteChunkRequest`](../../google/generativeai/protos/DeleteChunkRequest.md): Request to delete a ``Chunk``.
+
+[`class DeleteCorpusRequest`](../../google/generativeai/protos/DeleteCorpusRequest.md): Request to delete a ``Corpus``.
+
+[`class DeleteDocumentRequest`](../../google/generativeai/protos/DeleteDocumentRequest.md): Request to delete a ``Document``.
+
+[`class DeleteFileRequest`](../../google/generativeai/protos/DeleteFileRequest.md): Request for ``DeleteFile``.
+
+[`class DeletePermissionRequest`](../../google/generativeai/protos/DeletePermissionRequest.md): Request to delete the ``Permission``.
+
+[`class DeleteTunedModelRequest`](../../google/generativeai/protos/DeleteTunedModelRequest.md): Request to delete a TunedModel.
+
+[`class Document`](../../google/generativeai/protos/Document.md): A ``Document`` is a collection of ``Chunk``\ s.
+
+[`class DynamicRetrievalConfig`](../../google/generativeai/protos/DynamicRetrievalConfig.md): Describes the options to customize dynamic retrieval.
+
+[`class EmbedContentRequest`](../../google/generativeai/protos/EmbedContentRequest.md): Request containing the ``Content`` for the model to embed.
+
+[`class EmbedContentResponse`](../../google/generativeai/protos/EmbedContentResponse.md): The response to an ``EmbedContentRequest``.
+
+[`class EmbedTextRequest`](../../google/generativeai/protos/EmbedTextRequest.md): Request to get a text embedding from the model.
+
+[`class EmbedTextResponse`](../../google/generativeai/protos/EmbedTextResponse.md): The response to a EmbedTextRequest.
+
+[`class Embedding`](../../google/generativeai/protos/Embedding.md): A list of floats representing the embedding.
+
+[`class Example`](../../google/generativeai/protos/Example.md): An input/output example used to instruct the Model.
+
+[`class ExecutableCode`](../../google/generativeai/protos/ExecutableCode.md): Code generated by the model that is meant to be executed, and the result returned to the model.
+
+[`class File`](../../google/generativeai/protos/File.md): A file uploaded to the API.
+
+[`class FileData`](../../google/generativeai/protos/FileData.md): URI based data.
+
+[`class FunctionCall`](../../google/generativeai/protos/FunctionCall.md): A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
+
+[`class FunctionCallingConfig`](../../google/generativeai/protos/FunctionCallingConfig.md): Configuration for specifying function calling behavior.
+
+[`class FunctionDeclaration`](../../google/generativeai/protos/FunctionDeclaration.md): Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__.
+
+[`class FunctionResponse`](../../google/generativeai/protos/FunctionResponse.md): The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
+
+[`class GenerateAnswerRequest`](../../google/generativeai/protos/GenerateAnswerRequest.md): Request to generate a grounded answer from the ``Model``.
+
+[`class GenerateAnswerResponse`](../../google/generativeai/protos/GenerateAnswerResponse.md): Response from the model for a grounded answer.
+
+[`class GenerateContentRequest`](../../google/generativeai/protos/GenerateContentRequest.md): Request to generate a completion from the model.
+
+[`class GenerateContentResponse`](../../google/generativeai/protos/GenerateContentResponse.md): Response from the model supporting multiple candidate responses.
+
+[`class GenerateMessageRequest`](../../google/generativeai/protos/GenerateMessageRequest.md): Request to generate a message response from the model.
+
+[`class GenerateMessageResponse`](../../google/generativeai/protos/GenerateMessageResponse.md): The response from the model.
+
+[`class GenerateTextRequest`](../../google/generativeai/protos/GenerateTextRequest.md): Request to generate a text completion response from the model.
+
+[`class GenerateTextResponse`](../../google/generativeai/protos/GenerateTextResponse.md): The response from the model, including candidate completions.
+
+[`class GenerationConfig`](../../google/generativeai/protos/GenerationConfig.md): Configuration options for model generation and outputs.
+
+[`class GetCachedContentRequest`](../../google/generativeai/protos/GetCachedContentRequest.md): Request to read CachedContent.
+
+[`class GetChunkRequest`](../../google/generativeai/protos/GetChunkRequest.md): Request for getting information about a specific ``Chunk``.
+
+[`class GetCorpusRequest`](../../google/generativeai/protos/GetCorpusRequest.md): Request for getting information about a specific ``Corpus``.
+
+[`class GetDocumentRequest`](../../google/generativeai/protos/GetDocumentRequest.md): Request for getting information about a specific ``Document``.
+
+[`class GetFileRequest`](../../google/generativeai/protos/GetFileRequest.md): Request for ``GetFile``.
+
+[`class GetModelRequest`](../../google/generativeai/protos/GetModelRequest.md): Request for getting information about a specific Model.
+
+[`class GetPermissionRequest`](../../google/generativeai/protos/GetPermissionRequest.md): Request for getting information about a specific ``Permission``.
+
+[`class GetTunedModelRequest`](../../google/generativeai/protos/GetTunedModelRequest.md): Request for getting information about a specific Model.
+
+[`class GoogleSearchRetrieval`](../../google/generativeai/protos/GoogleSearchRetrieval.md): Tool to retrieve public web data for grounding, powered by Google.
+
+[`class GroundingAttribution`](../../google/generativeai/protos/GroundingAttribution.md): Attribution for a source that contributed to an answer.
+
+[`class GroundingChunk`](../../google/generativeai/protos/GroundingChunk.md): Grounding chunk.
+
+[`class GroundingMetadata`](../../google/generativeai/protos/GroundingMetadata.md): Metadata returned to client when grounding is enabled.
+
+[`class GroundingPassage`](../../google/generativeai/protos/GroundingPassage.md): Passage included inline with a grounding configuration.
+
+[`class GroundingPassages`](../../google/generativeai/protos/GroundingPassages.md): A repeated list of passages.
+
+[`class GroundingSupport`](../../google/generativeai/protos/GroundingSupport.md): Grounding support.
+
+[`class HarmCategory`](../../google/generativeai/protos/HarmCategory.md): The category of a rating.
+
+[`class Hyperparameters`](../../google/generativeai/protos/Hyperparameters.md): Hyperparameters controlling the tuning process.
+
+[`class ListCachedContentsRequest`](../../google/generativeai/protos/ListCachedContentsRequest.md): Request to list CachedContents.
+
+[`class ListCachedContentsResponse`](../../google/generativeai/protos/ListCachedContentsResponse.md): Response with CachedContents list.
+
+[`class ListChunksRequest`](../../google/generativeai/protos/ListChunksRequest.md): Request for listing ``Chunk``\ s.
+
+[`class ListChunksResponse`](../../google/generativeai/protos/ListChunksResponse.md): Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s.
+
+[`class ListCorporaRequest`](../../google/generativeai/protos/ListCorporaRequest.md): Request for listing ``Corpora``.
+
+[`class ListCorporaResponse`](../../google/generativeai/protos/ListCorporaResponse.md): Response from ``ListCorpora`` containing a paginated list of ``Corpora``.
+
+[`class ListDocumentsRequest`](../../google/generativeai/protos/ListDocumentsRequest.md): Request for listing ``Document``\ s.
+
+[`class ListDocumentsResponse`](../../google/generativeai/protos/ListDocumentsResponse.md): Response from ``ListDocuments`` containing a paginated list of ``Document``\ s.
+
+[`class ListFilesRequest`](../../google/generativeai/protos/ListFilesRequest.md): Request for ``ListFiles``.
+
+[`class ListFilesResponse`](../../google/generativeai/protos/ListFilesResponse.md): Response for ``ListFiles``.
+
+[`class ListModelsRequest`](../../google/generativeai/protos/ListModelsRequest.md): Request for listing all Models.
+
+[`class ListModelsResponse`](../../google/generativeai/protos/ListModelsResponse.md): Response from ``ListModel`` containing a paginated list of Models.
+
+[`class ListPermissionsRequest`](../../google/generativeai/protos/ListPermissionsRequest.md): Request for listing permissions.
+
+[`class ListPermissionsResponse`](../../google/generativeai/protos/ListPermissionsResponse.md): Response from ``ListPermissions`` containing a paginated list of permissions.
+
+[`class ListTunedModelsRequest`](../../google/generativeai/protos/ListTunedModelsRequest.md): Request for listing TunedModels.
+
+[`class ListTunedModelsResponse`](../../google/generativeai/protos/ListTunedModelsResponse.md): Response from ``ListTunedModels`` containing a paginated list of Models.
+
+[`class LogprobsResult`](../../google/generativeai/protos/LogprobsResult.md): Logprobs Result
+
+[`class Message`](../../google/generativeai/protos/Message.md): The base unit of structured text.
+
+[`class MessagePrompt`](../../google/generativeai/protos/MessagePrompt.md): All of the structured input text passed to the model as a prompt.
+
+[`class MetadataFilter`](../../google/generativeai/protos/MetadataFilter.md): User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values.
+
+[`class Model`](../../google/generativeai/protos/Model.md): Information about a Generative Language Model.
+
+[`class Part`](../../google/generativeai/protos/Part.md): A datatype containing media that is part of a multi-part ``Content`` message.
+
+[`class Permission`](../../google/generativeai/protos/Permission.md): Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
+
+[`class PredictRequest`](../../google/generativeai/protos/PredictRequest.md): Request message for [PredictionService.Predict][google.ai.generativelanguage.v1beta.PredictionService.Predict].
+
+[`class PredictResponse`](../../google/generativeai/protos/PredictResponse.md): Response message for [PredictionService.Predict].
+
+[`class QueryCorpusRequest`](../../google/generativeai/protos/QueryCorpusRequest.md): Request for querying a ``Corpus``.
+
+[`class QueryCorpusResponse`](../../google/generativeai/protos/QueryCorpusResponse.md): Response from ``QueryCorpus`` containing a list of relevant chunks.
+
+[`class QueryDocumentRequest`](../../google/generativeai/protos/QueryDocumentRequest.md): Request for querying a ``Document``.
+
+[`class QueryDocumentResponse`](../../google/generativeai/protos/QueryDocumentResponse.md): Response from ``QueryDocument`` containing a list of relevant chunks.
+
+[`class RelevantChunk`](../../google/generativeai/protos/RelevantChunk.md): The information for a chunk relevant to a query.
+
+[`class RetrievalMetadata`](../../google/generativeai/protos/RetrievalMetadata.md): Metadata related to retrieval in the grounding flow.
+
+[`class SafetyFeedback`](../../google/generativeai/protos/SafetyFeedback.md): Safety feedback for an entire request.
+
+[`class SafetyRating`](../../google/generativeai/protos/SafetyRating.md): Safety rating for a piece of content.
+
+[`class SafetySetting`](../../google/generativeai/protos/SafetySetting.md): Safety setting, affecting the safety-blocking behavior.
+
+[`class Schema`](../../google/generativeai/protos/Schema.md): The ``Schema`` object allows the definition of input and output data types.
+
+[`class SearchEntryPoint`](../../google/generativeai/protos/SearchEntryPoint.md): Google search entry point.
+
+[`class Segment`](../../google/generativeai/protos/Segment.md): Segment of the content.
+
+[`class SemanticRetrieverConfig`](../../google/generativeai/protos/SemanticRetrieverConfig.md): Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API.
+
+[`class StringList`](../../google/generativeai/protos/StringList.md): User provided string values assigned to a single metadata key.
+
+[`class TaskType`](../../google/generativeai/protos/TaskType.md): Type of task for which the embedding will be used.
+
+[`class TextCompletion`](../../google/generativeai/protos/TextCompletion.md): Output text returned from a model.
+
+[`class TextPrompt`](../../google/generativeai/protos/TextPrompt.md): Text given to the model as a prompt.
+
+[`class Tool`](../../google/generativeai/protos/Tool.md): Tool details that the model may use to generate response.
+
+[`class ToolConfig`](../../google/generativeai/protos/ToolConfig.md): The Tool configuration containing parameters for specifying ``Tool`` use in the request.
+
+[`class TransferOwnershipRequest`](../../google/generativeai/protos/TransferOwnershipRequest.md): Request to transfer the ownership of the tuned model.
+
+[`class TransferOwnershipResponse`](../../google/generativeai/protos/TransferOwnershipResponse.md): Response from ``TransferOwnership``.
+
+[`class TunedModel`](../../google/generativeai/protos/TunedModel.md): A fine-tuned model created using ModelService.CreateTunedModel.
+
+[`class TunedModelSource`](../../google/generativeai/protos/TunedModelSource.md): Tuned model as a source for training a new model.
+
+[`class TuningExample`](../../google/generativeai/protos/TuningExample.md): A single example for tuning.
+
+[`class TuningExamples`](../../google/generativeai/protos/TuningExamples.md): A set of tuning examples. Can be training or validation data.
+
+[`class TuningSnapshot`](../../google/generativeai/protos/TuningSnapshot.md): Record for a single tuning step.
+
+[`class TuningTask`](../../google/generativeai/protos/TuningTask.md): Tuning tasks that create tuned models.
+
+[`class Type`](../../google/generativeai/protos/Type.md): Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
+
+[`class UpdateCachedContentRequest`](../../google/generativeai/protos/UpdateCachedContentRequest.md): Request to update CachedContent.
+
+[`class UpdateChunkRequest`](../../google/generativeai/protos/UpdateChunkRequest.md): Request to update a ``Chunk``.
+
+[`class UpdateCorpusRequest`](../../google/generativeai/protos/UpdateCorpusRequest.md): Request to update a ``Corpus``.
+
+[`class UpdateDocumentRequest`](../../google/generativeai/protos/UpdateDocumentRequest.md): Request to update a ``Document``.
+
+[`class UpdatePermissionRequest`](../../google/generativeai/protos/UpdatePermissionRequest.md): Request to update the ``Permission``.
+
+[`class UpdateTunedModelRequest`](../../google/generativeai/protos/UpdateTunedModelRequest.md): Request to update a TunedModel.
+
+[`class VideoMetadata`](../../google/generativeai/protos/VideoMetadata.md): Metadata for a video ``File``.
+
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId.md b/docs/api/google/generativeai/protos/AttributionSourceId.md
new file mode 100644
index 000000000..83d292303
--- /dev/null
+++ b/docs/api/google/generativeai/protos/AttributionSourceId.md
@@ -0,0 +1,73 @@
+
+# google.generativeai.protos.AttributionSourceId
+
+
+
+
+
+
+
+Identifier for the source contributing to this attribution.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`grounding_passage`
+
+ |
+
+
+`google.ai.generativelanguage.AttributionSourceId.GroundingPassageId`
+
+Identifier for an inline passage.
+
+This field is a member of `oneof`_ ``source``.
+
+ |
+
+
+
+`semantic_retriever_chunk`
+
+ |
+
+
+`google.ai.generativelanguage.AttributionSourceId.SemanticRetrieverChunk`
+
+Identifier for a ``Chunk`` fetched via Semantic Retriever.
+
+This field is a member of `oneof`_ ``source``.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class GroundingPassageId`](../../../google/generativeai/protos/AttributionSourceId/GroundingPassageId.md)
+
+[`class SemanticRetrieverChunk`](../../../google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md)
+
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
new file mode 100644
index 000000000..99d5e65aa
--- /dev/null
+++ b/docs/api/google/generativeai/protos/AttributionSourceId/GroundingPassageId.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.AttributionSourceId.GroundingPassageId
+
+
+
+
+
+
+
+Identifier for a part within a ``GroundingPassage``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`passage_id`
+
+ |
+
+
+`str`
+
+Output only. ID of the passage matching the
+``GenerateAnswerRequest``'s GroundingPassage.id .
+
+ |
+
+
+
+`part_index`
+
+ |
+
+
+`int`
+
+Output only. Index of the part within the
+``GenerateAnswerRequest``'s GroundingPassage.content .
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
new file mode 100644
index 000000000..699a07c0f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/AttributionSourceId/SemanticRetrieverChunk.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.AttributionSourceId.SemanticRetrieverChunk
+
+
+
+
+
+
+
+Identifier for a ``Chunk`` retrieved via Semantic Retriever specified in the ``GenerateAnswerRequest`` using ``SemanticRetrieverConfig``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`source`
+
+ |
+
+
+`str`
+
+Output only. Name of the source matching the request's
+SemanticRetrieverConfig.source . Example: ``corpora/123``
+or ``corpora/123/documents/abc``
+
+ |
+
+
+
+`chunk`
+
+ |
+
+
+`str`
+
+Output only. Name of the ``Chunk`` containing the attributed
+text. Example: ``corpora/123/documents/abc/chunks/xyz``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
new file mode 100644
index 000000000..e734ed5b7
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchCreateChunksRequest.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.BatchCreateChunksRequest
+
+
+
+
+
+
+
+Request to batch create ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Optional. The name of the ``Document`` where this batch of
+``Chunk``\ s will be created. The parent field in every
+``CreateChunkRequest`` must match this value. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`requests`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.CreateChunkRequest]`
+
+Required. The request messages specifying the ``Chunk``\ s
+to create. A maximum of 100 ``Chunk``\ s can be created in a
+batch.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
new file mode 100644
index 000000000..03eb60ada
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchCreateChunksResponse.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.BatchCreateChunksResponse
+
+
+
+
+
+
+
+Response from ``BatchCreateChunks`` containing a list of created ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`chunks`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Chunk]`
+
+``Chunk``\ s created.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
new file mode 100644
index 000000000..74b79a461
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchDeleteChunksRequest.md
@@ -0,0 +1,63 @@
+
+# google.generativeai.protos.BatchDeleteChunksRequest
+
+
+
+
+
+
+
+Request to batch delete ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Optional. The name of the ``Document`` containing the
+``Chunk``\ s to delete. The parent field in every
+``DeleteChunkRequest`` must match this value. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`requests`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.DeleteChunkRequest]`
+
+Required. The request messages specifying the ``Chunk``\ s
+to delete.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
new file mode 100644
index 000000000..73b5ae36c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedContentsRequest.md
@@ -0,0 +1,67 @@
+
+# google.generativeai.protos.BatchEmbedContentsRequest
+
+
+
+
+
+
+
+Batch request to get embeddings from the model for a list of prompts.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+
+ |
+
+
+
+`requests`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.EmbedContentRequest]`
+
+Required. Embed requests for the batch. The model in each of
+these requests must match the model specified
+BatchEmbedContentsRequest.model .
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
new file mode 100644
index 000000000..27bb06245
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedContentsResponse.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.BatchEmbedContentsResponse
+
+
+
+
+
+
+
+The response to a ``BatchEmbedContentsRequest``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`embeddings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.ContentEmbedding]`
+
+Output only. The embeddings for each request,
+in the same order as provided in the batch
+request.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
new file mode 100644
index 000000000..d878bffc5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedTextRequest.md
@@ -0,0 +1,77 @@
+
+# google.generativeai.protos.BatchEmbedTextRequest
+
+
+
+
+
+
+
+Batch request to get a text embedding from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Model`` to use for generating
+the embedding. Examples: models/embedding-gecko-001
+
+ |
+
+
+
+`texts`
+
+ |
+
+
+`MutableSequence[str]`
+
+Optional. The free-form input texts that the
+model will turn into an embedding. The current
+limit is 100 texts, over which an error will be
+thrown.
+
+ |
+
+
+
+`requests`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.EmbedTextRequest]`
+
+Optional. Embed requests for the batch. Only one of
+``texts`` or ``requests`` can be set.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
new file mode 100644
index 000000000..3a9359d1d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchEmbedTextResponse.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.BatchEmbedTextResponse
+
+
+
+
+
+
+
+The response to a EmbedTextRequest.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`embeddings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Embedding]`
+
+Output only. The embeddings generated from
+the input text.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
new file mode 100644
index 000000000..3a85775b0
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchUpdateChunksRequest.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.BatchUpdateChunksRequest
+
+
+
+
+
+
+
+Request to batch update ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Optional. The name of the ``Document`` containing the
+``Chunk``\ s to update. The parent field in every
+``UpdateChunkRequest`` must match this value. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`requests`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.UpdateChunkRequest]`
+
+Required. The request messages specifying the ``Chunk``\ s
+to update. A maximum of 100 ``Chunk``\ s can be updated in a
+batch.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
new file mode 100644
index 000000000..bc797699c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/BatchUpdateChunksResponse.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.BatchUpdateChunksResponse
+
+
+
+
+
+
+
+Response from ``BatchUpdateChunks`` containing a list of updated ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`chunks`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Chunk]`
+
+``Chunk``\ s updated.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Blob.md b/docs/api/google/generativeai/protos/Blob.md
new file mode 100644
index 000000000..f2b36ca9c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Blob.md
@@ -0,0 +1,66 @@
+
+# google.generativeai.protos.Blob
+
+
+
+
+
+
+
+Raw media bytes.
+
+
+
+Text should not be sent as raw bytes, use the 'text' field.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`mime_type`
+
+ |
+
+
+`str`
+
+The IANA standard MIME type of the source data. Examples:
+
+- image/png
+- image/jpeg If an unsupported MIME type is provided, an
+ error will be returned. For a complete list of supported
+ types, see `Supported file
+ formats `__.
+
+ |
+
+
+
+`data`
+
+ |
+
+
+`bytes`
+
+Raw bytes for media formats.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CachedContent.md b/docs/api/google/generativeai/protos/CachedContent.md
new file mode 100644
index 000000000..063c8fa43
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CachedContent.md
@@ -0,0 +1,222 @@
+
+# google.generativeai.protos.CachedContent
+
+
+
+
+
+
+
+Content that has been preprocessed and can be used in subsequent request to GenerativeService.
+
+
+
+Cached content can be only used with model it was created for.
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`expire_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Timestamp in UTC of when this resource is considered
+expired. This is *always* provided on output, regardless of
+what was sent on input.
+
+This field is a member of `oneof`_ ``expiration``.
+
+ |
+
+
+
+`ttl`
+
+ |
+
+
+`google.protobuf.duration_pb2.Duration`
+
+Input only. New TTL for this resource, input
+only.
+
+This field is a member of `oneof`_ ``expiration``.
+
+ |
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Optional. Identifier. The resource name referring to the
+cached content. Format: ``cachedContents/{id}``
+
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+`str`
+
+Optional. Immutable. The user-generated
+meaningful display name of the cached content.
+Maximum 128 Unicode characters.
+
+
+ |
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. Immutable. The name of the ``Model`` to use for
+cached content Format: ``models/{model}``
+
+
+ |
+
+
+
+`system_instruction`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Optional. Input only. Immutable. Developer
+set system instruction. Currently text only.
+
+
+ |
+
+
+
+`contents`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Optional. Input only. Immutable. The content
+to cache.
+
+ |
+
+
+
+`tools`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Tool]`
+
+Optional. Input only. Immutable. A list of ``Tools`` the
+model may use to generate the next response
+
+ |
+
+
+
+`tool_config`
+
+ |
+
+
+`google.ai.generativelanguage.ToolConfig`
+
+Optional. Input only. Immutable. Tool config.
+This config is shared for all tools.
+
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. Creation time of the cache
+entry.
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. When the cache entry was last
+updated in UTC time.
+
+ |
+
+
+
+`usage_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.CachedContent.UsageMetadata`
+
+Output only. Metadata on the usage of the
+cached content.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class UsageMetadata`](../../../google/generativeai/protos/CachedContent/UsageMetadata.md)
+
diff --git a/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
new file mode 100644
index 000000000..bfb519b55
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CachedContent/UsageMetadata.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.CachedContent.UsageMetadata
+
+
+
+
+
+
+
+Metadata on the usage of the cached content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`total_token_count`
+
+ |
+
+
+`int`
+
+Total number of tokens that the cached
+content consumes.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Candidate.md b/docs/api/google/generativeai/protos/Candidate.md
new file mode 100644
index 000000000..c2db87a92
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Candidate.md
@@ -0,0 +1,186 @@
+
+# google.generativeai.protos.Candidate
+
+
+
+
+
+
+
+A response candidate generated from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`index`
+
+ |
+
+
+`int`
+
+Output only. Index of the candidate in the
+list of response candidates.
+
+
+ |
+
+
+
+`content`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Output only. Generated content returned from
+the model.
+
+ |
+
+
+
+`finish_reason`
+
+ |
+
+
+`google.ai.generativelanguage.Candidate.FinishReason`
+
+Optional. Output only. The reason why the
+model stopped generating tokens.
+If empty, the model has not stopped generating
+tokens.
+
+ |
+
+
+
+`safety_ratings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+List of ratings for the safety of a response
+candidate.
+There is at most one rating per category.
+
+ |
+
+
+
+`citation_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.CitationMetadata`
+
+Output only. Citation information for model-generated
+candidate.
+
+This field may be populated with recitation information for
+any text included in the ``content``. These are passages
+that are "recited" from copyrighted material in the
+foundational LLM's training data.
+
+ |
+
+
+
+`token_count`
+
+ |
+
+
+`int`
+
+Output only. Token count for this candidate.
+
+ |
+
+
+
+`grounding_attributions`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.GroundingAttribution]`
+
+Output only. Attribution information for sources that
+contributed to a grounded answer.
+
+This field is populated for ``GenerateAnswer`` calls.
+
+ |
+
+
+
+`grounding_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.GroundingMetadata`
+
+Output only. Grounding metadata for the candidate.
+
+This field is populated for ``GenerateContent`` calls.
+
+ |
+
+
+
+`avg_logprobs`
+
+ |
+
+
+`float`
+
+Output only.
+
+ |
+
+
+
+`logprobs_result`
+
+ |
+
+
+`google.ai.generativelanguage.LogprobsResult`
+
+Output only. Log-likelihood scores for the
+response tokens and top tokens
+
+ |
+
+
+
+
+
+## Child Classes
+[`class FinishReason`](../../../google/generativeai/protos/Candidate/FinishReason.md)
+
diff --git a/docs/api/google/generativeai/protos/Candidate/FinishReason.md b/docs/api/google/generativeai/protos/Candidate/FinishReason.md
new file mode 100644
index 000000000..9067d6ff4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Candidate/FinishReason.md
@@ -0,0 +1,876 @@
+
+# google.generativeai.protos.Candidate.FinishReason
+
+
+
+
+
+
+
+Defines the reason why the model stopped generating tokens.
+
+
+google.generativeai.protos.Candidate.FinishReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`FINISH_REASON_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Default value. This value is unused.
+
+ |
+
+
+
+`STOP`
+
+ |
+
+
+`1`
+
+Natural stop point of the model or provided
+stop sequence.
+
+ |
+
+
+
+`MAX_TOKENS`
+
+ |
+
+
+`2`
+
+The maximum number of tokens as specified in
+the request was reached.
+
+ |
+
+
+
+`SAFETY`
+
+ |
+
+
+`3`
+
+The response candidate content was flagged
+for safety reasons.
+
+ |
+
+
+
+`RECITATION`
+
+ |
+
+
+`4`
+
+The response candidate content was flagged
+for recitation reasons.
+
+ |
+
+
+
+`LANGUAGE`
+
+ |
+
+
+`6`
+
+The response candidate content was flagged
+for using an unsupported language.
+
+ |
+
+
+
+`OTHER`
+
+ |
+
+
+`5`
+
+Unknown reason.
+
+ |
+
+
+
+`BLOCKLIST`
+
+ |
+
+
+`7`
+
+Token generation stopped because the content
+contains forbidden terms.
+
+ |
+
+
+
+`PROHIBITED_CONTENT`
+
+ |
+
+
+`8`
+
+Token generation stopped for potentially
+containing prohibited content.
+
+ |
+
+
+
+`SPII`
+
+ |
+
+
+`9`
+
+Token generation stopped because the content
+potentially contains Sensitive Personally
+Identifiable Information (SPII).
+
+ |
+
+
+
+`MALFORMED_FUNCTION_CALL`
+
+ |
+
+
+`10`
+
+The function call generated by the model is
+invalid.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+BLOCKLIST
+
+ |
+
+
+``
+
+ |
+
+
+
+FINISH_REASON_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+LANGUAGE
+
+ |
+
+
+``
+
+ |
+
+
+
+MALFORMED_FUNCTION_CALL
+
+ |
+
+
+``
+
+ |
+
+
+
+MAX_TOKENS
+
+ |
+
+
+``
+
+ |
+
+
+
+OTHER
+
+ |
+
+
+``
+
+ |
+
+
+
+PROHIBITED_CONTENT
+
+ |
+
+
+``
+
+ |
+
+
+
+RECITATION
+
+ |
+
+
+``
+
+ |
+
+
+
+SAFETY
+
+ |
+
+
+``
+
+ |
+
+
+
+SPII
+
+ |
+
+
+``
+
+ |
+
+
+
+STOP
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Chunk.md b/docs/api/google/generativeai/protos/Chunk.md
new file mode 100644
index 000000000..b1fbdb9e6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Chunk.md
@@ -0,0 +1,125 @@
+
+# google.generativeai.protos.Chunk
+
+
+
+
+
+
+
+A ``Chunk`` is a subpart of a ``Document`` that is treated as an independent unit for the purposes of vector representation and storage.
+
+
+ A ``Corpus`` can have a maximum of 1 million ``Chunk``\ s.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Immutable. Identifier. The ``Chunk`` resource name. The ID
+(name excluding the `corpora/*/documents/*/chunks/` prefix)
+can contain up to 40 characters that are lowercase
+alphanumeric or dashes (-). The ID cannot start or end with
+a dash. If the name is empty on create, a random
+12-character unique ID will be generated. Example:
+``corpora/{corpus_id}/documents/{document_id}/chunks/123a456b789c``
+
+ |
+
+
+
+`data`
+
+ |
+
+
+`google.ai.generativelanguage.ChunkData`
+
+Required. The content for the ``Chunk``, such as the text
+string. The maximum number of tokens per chunk is 2043.
+
+ |
+
+
+
+`custom_metadata`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.CustomMetadata]`
+
+Optional. User provided custom metadata stored as key-value
+pairs. The maximum number of ``CustomMetadata`` per chunk is
+20.
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Chunk`` was
+created.
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Chunk`` was last
+updated.
+
+ |
+
+
+
+`state`
+
+ |
+
+
+`google.ai.generativelanguage.Chunk.State`
+
+Output only. Current state of the ``Chunk``.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class State`](../../../google/generativeai/protos/Chunk/State.md)
+
diff --git a/docs/api/google/generativeai/protos/Chunk/State.md b/docs/api/google/generativeai/protos/Chunk/State.md
new file mode 100644
index 000000000..c825186bf
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Chunk/State.md
@@ -0,0 +1,699 @@
+
+# google.generativeai.protos.Chunk.State
+
+
+
+
+
+
+
+States for the lifecycle of a ``Chunk``.
+
+
+google.generativeai.protos.Chunk.State(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`STATE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+The default value. This value is used if the
+state is omitted.
+
+ |
+
+
+
+`STATE_PENDING_PROCESSING`
+
+ |
+
+
+`1`
+
+``Chunk`` is being processed (embedding and vector storage).
+
+ |
+
+
+
+`STATE_ACTIVE`
+
+ |
+
+
+`2`
+
+``Chunk`` is processed and available for querying.
+
+ |
+
+
+
+`STATE_FAILED`
+
+ |
+
+
+`10`
+
+``Chunk`` failed processing.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+STATE_ACTIVE
+
+ |
+
+
+``
+
+ |
+
+
+
+STATE_FAILED
+
+ |
+
+
+``
+
+ |
+
+
+
+STATE_PENDING_PROCESSING
+
+ |
+
+
+``
+
+ |
+
+
+
+STATE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/ChunkData.md b/docs/api/google/generativeai/protos/ChunkData.md
new file mode 100644
index 000000000..d07af3291
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ChunkData.md
@@ -0,0 +1,49 @@
+
+# google.generativeai.protos.ChunkData
+
+
+
+
+
+
+
+Extracted data that represents the ``Chunk`` content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`string_value`
+
+ |
+
+
+`str`
+
+The ``Chunk`` content as a string. The maximum number of
+tokens per chunk is 2043.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CitationMetadata.md b/docs/api/google/generativeai/protos/CitationMetadata.md
new file mode 100644
index 000000000..ddf7c3a25
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CitationMetadata.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.CitationMetadata
+
+
+
+
+
+
+
+A collection of source attributions for a piece of content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`citation_sources`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.CitationSource]`
+
+Citations to sources for a specific response.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CitationSource.md b/docs/api/google/generativeai/protos/CitationSource.md
new file mode 100644
index 000000000..4eb956912
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CitationSource.md
@@ -0,0 +1,98 @@
+
+# google.generativeai.protos.CitationSource
+
+
+
+
+
+
+
+A citation to a source for a portion of a specific response.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`start_index`
+
+ |
+
+
+`int`
+
+Optional. Start of segment of the response
+that is attributed to this source.
+
+Index indicates the start of the segment,
+measured in bytes.
+
+
+ |
+
+
+
+`end_index`
+
+ |
+
+
+`int`
+
+Optional. End of the attributed segment,
+exclusive.
+
+
+ |
+
+
+
+`uri`
+
+ |
+
+
+`str`
+
+Optional. URI that is attributed as a source
+for a portion of the text.
+
+
+ |
+
+
+
+`license_`
+
+ |
+
+
+`str`
+
+Optional. License for the GitHub project that
+is attributed as a source for segment.
+
+License info is required for code citations.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CodeExecution.md b/docs/api/google/generativeai/protos/CodeExecution.md
new file mode 100644
index 000000000..d8a467e21
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CodeExecution.md
@@ -0,0 +1,23 @@
+
+# google.generativeai.protos.CodeExecution
+
+
+
+
+
+
+
+Tool that executes code generated by the model, and automatically returns the result to the model.
+
+
+
+See also ``ExecutableCode`` and ``CodeExecutionResult`` which are
+only generated when using this tool.
+
diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult.md b/docs/api/google/generativeai/protos/CodeExecutionResult.md
new file mode 100644
index 000000000..a02521651
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CodeExecutionResult.md
@@ -0,0 +1,66 @@
+
+# google.generativeai.protos.CodeExecutionResult
+
+
+
+
+
+
+
+Result of executing the ``ExecutableCode``.
+
+
+
+Only generated when using the ``CodeExecution``, and always follows
+a ``part`` containing the ``ExecutableCode``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`outcome`
+
+ |
+
+
+`google.ai.generativelanguage.CodeExecutionResult.Outcome`
+
+Required. Outcome of the code execution.
+
+ |
+
+
+
+`output`
+
+ |
+
+
+`str`
+
+Optional. Contains stdout when code execution
+is successful, stderr or other description
+otherwise.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Outcome`](../../../google/generativeai/protos/CodeExecutionResult/Outcome.md)
+
diff --git a/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
new file mode 100644
index 000000000..81894cf17
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CodeExecutionResult/Outcome.md
@@ -0,0 +1,702 @@
+
+# google.generativeai.protos.CodeExecutionResult.Outcome
+
+
+
+
+
+
+
+Enumeration of possible outcomes of the code execution.
+
+
+google.generativeai.protos.CodeExecutionResult.Outcome(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`OUTCOME_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Unspecified status. This value should not be
+used.
+
+ |
+
+
+
+`OUTCOME_OK`
+
+ |
+
+
+`1`
+
+Code execution completed successfully.
+
+ |
+
+
+
+`OUTCOME_FAILED`
+
+ |
+
+
+`2`
+
+Code execution finished but with a failure. ``stderr``
+should contain the reason.
+
+ |
+
+
+
+`OUTCOME_DEADLINE_EXCEEDED`
+
+ |
+
+
+`3`
+
+Code execution ran for too long, and was
+cancelled. There may or may not be a partial
+output present.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+OUTCOME_DEADLINE_EXCEEDED
+
+ |
+
+
+``
+
+ |
+
+
+
+OUTCOME_FAILED
+
+ |
+
+
+``
+
+ |
+
+
+
+OUTCOME_OK
+
+ |
+
+
+``
+
+ |
+
+
+
+OUTCOME_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Condition.md b/docs/api/google/generativeai/protos/Condition.md
new file mode 100644
index 000000000..64387da7a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Condition.md
@@ -0,0 +1,85 @@
+
+# google.generativeai.protos.Condition
+
+
+
+
+
+
+
+Filter condition applicable to a single key.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`string_value`
+
+ |
+
+
+`str`
+
+The string value to filter the metadata on.
+
+This field is a member of `oneof`_ ``value``.
+
+ |
+
+
+
+`numeric_value`
+
+ |
+
+
+`float`
+
+The numeric value to filter the metadata on.
+
+This field is a member of `oneof`_ ``value``.
+
+ |
+
+
+
+`operation`
+
+ |
+
+
+`google.ai.generativelanguage.Condition.Operator`
+
+Required. Operator applied to the given
+key-value pair to trigger the condition.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Operator`](../../../google/generativeai/protos/Condition/Operator.md)
+
diff --git a/docs/api/google/generativeai/protos/Condition/Operator.md b/docs/api/google/generativeai/protos/Condition/Operator.md
new file mode 100644
index 000000000..d77479498
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Condition/Operator.md
@@ -0,0 +1,820 @@
+
+# google.generativeai.protos.Condition.Operator
+
+
+
+
+
+
+
+Defines the valid operators that can be applied to a key-value pair.
+
+
+google.generativeai.protos.Condition.Operator(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`OPERATOR_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+The default value. This value is unused.
+
+ |
+
+
+
+`LESS`
+
+ |
+
+
+`1`
+
+Supported by numeric.
+
+ |
+
+
+
+`LESS_EQUAL`
+
+ |
+
+
+`2`
+
+Supported by numeric.
+
+ |
+
+
+
+`EQUAL`
+
+ |
+
+
+`3`
+
+Supported by numeric & string.
+
+ |
+
+
+
+`GREATER_EQUAL`
+
+ |
+
+
+`4`
+
+Supported by numeric.
+
+ |
+
+
+
+`GREATER`
+
+ |
+
+
+`5`
+
+Supported by numeric.
+
+ |
+
+
+
+`NOT_EQUAL`
+
+ |
+
+
+`6`
+
+Supported by numeric & string.
+
+ |
+
+
+
+`INCLUDES`
+
+ |
+
+
+`7`
+
+Supported by string only when ``CustomMetadata`` value type
+for the given key has a ``string_list_value``.
+
+ |
+
+
+
+`EXCLUDES`
+
+ |
+
+
+`8`
+
+Supported by string only when ``CustomMetadata`` value type
+for the given key has a ``string_list_value``.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+EQUAL
+
+ |
+
+
+``
+
+ |
+
+
+
+EXCLUDES
+
+ |
+
+
+``
+
+ |
+
+
+
+GREATER
+
+ |
+
+
+``
+
+ |
+
+
+
+GREATER_EQUAL
+
+ |
+
+
+``
+
+ |
+
+
+
+INCLUDES
+
+ |
+
+
+``
+
+ |
+
+
+
+LESS
+
+ |
+
+
+``
+
+ |
+
+
+
+LESS_EQUAL
+
+ |
+
+
+``
+
+ |
+
+
+
+NOT_EQUAL
+
+ |
+
+
+``
+
+ |
+
+
+
+OPERATOR_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Content.md b/docs/api/google/generativeai/protos/Content.md
new file mode 100644
index 000000000..f4ae11276
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Content.md
@@ -0,0 +1,66 @@
+
+# google.generativeai.protos.Content
+
+
+
+
+
+
+
+The base structured datatype containing multi-part content of a message.
+
+
+
+A ``Content`` includes a ``role`` field designating the producer of
+the ``Content`` and a ``parts`` field containing multi-part data
+that contains the content of the message turn.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parts`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Part]`
+
+Ordered ``Parts`` that constitute a single message. Parts
+may have different MIME types.
+
+ |
+
+
+
+`role`
+
+ |
+
+
+`str`
+
+Optional. The producer of the content. Must
+be either 'user' or 'model'.
+Useful to set for multi-turn conversations,
+otherwise can be left blank or unset.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ContentEmbedding.md b/docs/api/google/generativeai/protos/ContentEmbedding.md
new file mode 100644
index 000000000..b243b5b92
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ContentEmbedding.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.ContentEmbedding
+
+
+
+
+
+
+
+A list of floats representing an embedding.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`values`
+
+ |
+
+
+`MutableSequence[float]`
+
+The embedding values.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ContentFilter.md b/docs/api/google/generativeai/protos/ContentFilter.md
new file mode 100644
index 000000000..a324b43cf
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ContentFilter.md
@@ -0,0 +1,68 @@
+
+# google.generativeai.protos.ContentFilter
+
+
+
+
+
+
+
+Content filtering metadata associated with processing a single request.
+
+
+ContentFilter contains a reason and an optional supporting
+string. The reason may be unspecified.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`reason`
+
+ |
+
+
+`google.ai.generativelanguage.ContentFilter.BlockedReason`
+
+The reason content was blocked during request
+processing.
+
+ |
+
+
+
+`message`
+
+ |
+
+
+`str`
+
+A string that describes the filtering
+behavior in more detail.
+
+
+ |
+
+
+
+
+
+## Child Classes
+[`class BlockedReason`](../../../google/generativeai/types/BlockedReason.md)
+
diff --git a/docs/api/google/generativeai/protos/Corpus.md b/docs/api/google/generativeai/protos/Corpus.md
new file mode 100644
index 000000000..0022cce5c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Corpus.md
@@ -0,0 +1,97 @@
+
+# google.generativeai.protos.Corpus
+
+
+
+
+
+
+
+A ``Corpus`` is a collection of ``Document``\ s.
+
+
+ A project can
+create up to 5 corpora.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Immutable. Identifier. The ``Corpus`` resource name. The ID
+(name excluding the "corpora/" prefix) can contain up to 40
+characters that are lowercase alphanumeric or dashes (-).
+The ID cannot start or end with a dash. If the name is empty
+on create, a unique name will be derived from
+``display_name`` along with a 12 character random suffix.
+Example: ``corpora/my-awesome-corpora-123a456b789c``
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+`str`
+
+Optional. The human-readable display name for the
+``Corpus``. The display name must be no more than 512
+characters in length, including spaces. Example: "Docs on
+Semantic Retriever".
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Corpus`` was
+created.
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Corpus`` was last
+updated.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountMessageTokensRequest.md b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md
new file mode 100644
index 000000000..548a8b60f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountMessageTokensRequest.md
@@ -0,0 +1,68 @@
+
+# google.generativeai.protos.CountMessageTokensRequest
+
+
+
+
+
+
+
+Counts the number of tokens in the ``prompt`` sent to a model.
+
+
+
+Models may tokenize text differently, so each model may return a
+different ``token_count``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+
+ |
+
+
+
+`prompt`
+
+ |
+
+
+`google.ai.generativelanguage.MessagePrompt`
+
+Required. The prompt, whose token count is to
+be returned.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountMessageTokensResponse.md b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md
new file mode 100644
index 000000000..d7a45c871
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountMessageTokensResponse.md
@@ -0,0 +1,50 @@
+
+# google.generativeai.protos.CountMessageTokensResponse
+
+
+
+
+
+
+
+A response from ``CountMessageTokens``.
+
+
+
+It returns the model's ``token_count`` for the ``prompt``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`token_count`
+
+ |
+
+
+`int`
+
+The number of tokens that the ``model`` tokenizes the
+``prompt`` into.
+
+Always non-negative.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTextTokensRequest.md b/docs/api/google/generativeai/protos/CountTextTokensRequest.md
new file mode 100644
index 000000000..f208e45c5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTextTokensRequest.md
@@ -0,0 +1,68 @@
+
+# google.generativeai.protos.CountTextTokensRequest
+
+
+
+
+
+
+
+Counts the number of tokens in the ``prompt`` sent to a model.
+
+
+
+Models may tokenize text differently, so each model may return a
+different ``token_count``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+
+ |
+
+
+
+`prompt`
+
+ |
+
+
+`google.ai.generativelanguage.TextPrompt`
+
+Required. The free-form input text given to
+the model as a prompt.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTextTokensResponse.md b/docs/api/google/generativeai/protos/CountTextTokensResponse.md
new file mode 100644
index 000000000..a3f36b0e5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTextTokensResponse.md
@@ -0,0 +1,50 @@
+
+# google.generativeai.protos.CountTextTokensResponse
+
+
+
+
+
+
+
+A response from ``CountTextTokens``.
+
+
+
+It returns the model's ``token_count`` for the ``prompt``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`token_count`
+
+ |
+
+
+`int`
+
+The number of tokens that the ``model`` tokenizes the
+``prompt`` into.
+
+Always non-negative.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTokensRequest.md b/docs/api/google/generativeai/protos/CountTokensRequest.md
new file mode 100644
index 000000000..df49484e4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTokensRequest.md
@@ -0,0 +1,90 @@
+
+# google.generativeai.protos.CountTokensRequest
+
+
+
+
+
+
+
+Counts the number of tokens in the ``prompt`` sent to a model.
+
+
+
+Models may tokenize text differently, so each model may return a
+different ``token_count``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+
+ |
+
+
+
+`contents`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Optional. The input given to the model as a prompt. This
+field is ignored when ``generate_content_request`` is set.
+
+ |
+
+
+
+`generate_content_request`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateContentRequest`
+
+Optional. The overall input given to the ``Model``. This
+includes the prompt as well as other model steering
+information like `system
+instructions `__,
+and/or function declarations for `function
+calling `__.
+``Model``\ s/\ ``Content``\ s and
+``generate_content_request``\ s are mutually exclusive. You
+can either send ``Model`` + ``Content``\ s or a
+``generate_content_request``, but never both.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CountTokensResponse.md b/docs/api/google/generativeai/protos/CountTokensResponse.md
new file mode 100644
index 000000000..a17b73761
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CountTokensResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.CountTokensResponse
+
+
+
+
+
+
+
+A response from ``CountTokens``.
+
+
+
+It returns the model's ``token_count`` for the ``prompt``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`total_tokens`
+
+ |
+
+
+`int`
+
+The number of tokens that the ``Model`` tokenizes the
+``prompt`` into. Always non-negative.
+
+ |
+
+
+
+`cached_content_token_count`
+
+ |
+
+
+`int`
+
+Number of tokens in the cached part of the
+prompt (the cached content).
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateCachedContentRequest.md b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md
new file mode 100644
index 000000000..4b83d5d77
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateCachedContentRequest.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.CreateCachedContentRequest
+
+
+
+
+
+
+
+Request to create CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`cached_content`
+
+ |
+
+
+`google.ai.generativelanguage.CachedContent`
+
+Required. The cached content to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateChunkRequest.md b/docs/api/google/generativeai/protos/CreateChunkRequest.md
new file mode 100644
index 000000000..3ab8abffa
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateChunkRequest.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.CreateChunkRequest
+
+
+
+
+
+
+
+Request to create a ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Document`` where this ``Chunk``
+will be created. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`chunk`
+
+ |
+
+
+`google.ai.generativelanguage.Chunk`
+
+Required. The ``Chunk`` to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateCorpusRequest.md b/docs/api/google/generativeai/protos/CreateCorpusRequest.md
new file mode 100644
index 000000000..5eeb5eb4e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateCorpusRequest.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.CreateCorpusRequest
+
+
+
+
+
+
+
+Request to create a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`corpus`
+
+ |
+
+
+`google.ai.generativelanguage.Corpus`
+
+Required. The ``Corpus`` to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateDocumentRequest.md b/docs/api/google/generativeai/protos/CreateDocumentRequest.md
new file mode 100644
index 000000000..43def7bc5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateDocumentRequest.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.CreateDocumentRequest
+
+
+
+
+
+
+
+Request to create a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Corpus`` where this ``Document``
+will be created. Example: ``corpora/my-corpus-123``
+
+ |
+
+
+
+`document`
+
+ |
+
+
+`google.ai.generativelanguage.Document`
+
+Required. The ``Document`` to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateFileRequest.md b/docs/api/google/generativeai/protos/CreateFileRequest.md
new file mode 100644
index 000000000..0b3d1b297
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateFileRequest.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.CreateFileRequest
+
+
+
+
+
+
+
+Request for ``CreateFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`file`
+
+ |
+
+
+`google.ai.generativelanguage.File`
+
+Optional. Metadata for the file to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateFileResponse.md b/docs/api/google/generativeai/protos/CreateFileResponse.md
new file mode 100644
index 000000000..2c4f591ae
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateFileResponse.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.CreateFileResponse
+
+
+
+
+
+
+
+Response for ``CreateFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`file`
+
+ |
+
+
+`google.ai.generativelanguage.File`
+
+Metadata for the created file.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreatePermissionRequest.md b/docs/api/google/generativeai/protos/CreatePermissionRequest.md
new file mode 100644
index 000000000..c6b2bd653
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreatePermissionRequest.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.CreatePermissionRequest
+
+
+
+
+
+
+
+Request to create a ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Required. The parent resource of the ``Permission``.
+Formats: ``tunedModels/{tuned_model}`` ``corpora/{corpus}``
+
+ |
+
+
+
+`permission`
+
+ |
+
+
+`google.ai.generativelanguage.Permission`
+
+Required. The permission to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
new file mode 100644
index 000000000..89e69b822
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateTunedModelMetadata.md
@@ -0,0 +1,100 @@
+
+# google.generativeai.protos.CreateTunedModelMetadata
+
+
+
+
+
+
+
+Metadata about the state and progress of creating a tuned model returned from the long-running operation
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`tuned_model`
+
+ |
+
+
+`str`
+
+Name of the tuned model associated with the
+tuning operation.
+
+ |
+
+
+
+`total_steps`
+
+ |
+
+
+`int`
+
+The total number of tuning steps.
+
+ |
+
+
+
+`completed_steps`
+
+ |
+
+
+`int`
+
+The number of steps completed.
+
+ |
+
+
+
+`completed_percent`
+
+ |
+
+
+`float`
+
+The completed percentage for the tuning
+operation.
+
+ |
+
+
+
+`snapshots`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.TuningSnapshot]`
+
+Metrics collected during tuning.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CreateTunedModelRequest.md b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md
new file mode 100644
index 000000000..198579199
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CreateTunedModelRequest.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.CreateTunedModelRequest
+
+
+
+
+
+
+
+Request to create a TunedModel.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`tuned_model_id`
+
+ |
+
+
+`str`
+
+Optional. The unique id for the tuned model if specified.
+This value should be up to 40 characters, the first
+character must be a letter, the last could be a letter or a
+number. The id must match the regular expression:
+``[a-z]([a-z0-9-]{0,38}[a-z0-9])?``.
+
+
+ |
+
+
+
+`tuned_model`
+
+ |
+
+
+`google.ai.generativelanguage.TunedModel`
+
+Required. The tuned model to create.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/CustomMetadata.md b/docs/api/google/generativeai/protos/CustomMetadata.md
new file mode 100644
index 000000000..1c9426b2f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/CustomMetadata.md
@@ -0,0 +1,97 @@
+
+# google.generativeai.protos.CustomMetadata
+
+
+
+
+
+
+
+User provided metadata stored as key-value pairs.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`string_value`
+
+ |
+
+
+`str`
+
+The string value of the metadata to store.
+
+This field is a member of `oneof`_ ``value``.
+
+ |
+
+
+
+`string_list_value`
+
+ |
+
+
+`google.ai.generativelanguage.StringList`
+
+The StringList value of the metadata to
+store.
+
+This field is a member of `oneof`_ ``value``.
+
+ |
+
+
+
+`numeric_value`
+
+ |
+
+
+`float`
+
+The numeric value of the metadata to store.
+
+This field is a member of `oneof`_ ``value``.
+
+ |
+
+
+
+`key`
+
+ |
+
+
+`str`
+
+Required. The key of the metadata to store.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Dataset.md b/docs/api/google/generativeai/protos/Dataset.md
new file mode 100644
index 000000000..32cc28cf1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Dataset.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.Dataset
+
+
+
+
+
+
+
+Dataset for training or validation.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`examples`
+
+ |
+
+
+`google.ai.generativelanguage.TuningExamples`
+
+Optional. Inline examples.
+
+This field is a member of `oneof`_ ``dataset``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
new file mode 100644
index 000000000..184707486
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteCachedContentRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.DeleteCachedContentRequest
+
+
+
+
+
+
+
+Request to delete CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name referring to the content cache
+entry Format: ``cachedContents/{id}``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteChunkRequest.md b/docs/api/google/generativeai/protos/DeleteChunkRequest.md
new file mode 100644
index 000000000..514a124c5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteChunkRequest.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.DeleteChunkRequest
+
+
+
+
+
+
+
+Request to delete a ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the ``Chunk`` to delete.
+Example:
+``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteCorpusRequest.md b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md
new file mode 100644
index 000000000..11cf262f5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteCorpusRequest.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.DeleteCorpusRequest
+
+
+
+
+
+
+
+Request to delete a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the ``Corpus``. Example:
+``corpora/my-corpus-123``
+
+ |
+
+
+
+`force`
+
+ |
+
+
+`bool`
+
+Optional. If set to true, any ``Document``\ s and objects
+related to this ``Corpus`` will also be deleted.
+
+If false (the default), a ``FAILED_PRECONDITION`` error will
+be returned if ``Corpus`` contains any ``Document``\ s.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteDocumentRequest.md b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md
new file mode 100644
index 000000000..698944b2f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteDocumentRequest.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.DeleteDocumentRequest
+
+
+
+
+
+
+
+Request to delete a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the ``Document`` to delete.
+Example: ``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`force`
+
+ |
+
+
+`bool`
+
+Optional. If set to true, any ``Chunk``\ s and objects
+related to this ``Document`` will also be deleted.
+
+If false (the default), a ``FAILED_PRECONDITION`` error will
+be returned if ``Document`` contains any ``Chunk``\ s.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteFileRequest.md b/docs/api/google/generativeai/protos/DeleteFileRequest.md
new file mode 100644
index 000000000..e0c28ae36
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteFileRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.DeleteFileRequest
+
+
+
+
+
+
+
+Request for ``DeleteFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``File`` to delete. Example:
+``files/abc-123``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeletePermissionRequest.md b/docs/api/google/generativeai/protos/DeletePermissionRequest.md
new file mode 100644
index 000000000..de773d62b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeletePermissionRequest.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.DeletePermissionRequest
+
+
+
+
+
+
+
+Request to delete the ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the permission. Formats:
+``tunedModels/{tuned_model}/permissions/{permission}``
+``corpora/{corpus}/permissions/{permission}``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
new file mode 100644
index 000000000..f335c1163
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DeleteTunedModelRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.DeleteTunedModelRequest
+
+
+
+
+
+
+
+Request to delete a TunedModel.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the model. Format:
+``tunedModels/my-model-id``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Document.md b/docs/api/google/generativeai/protos/Document.md
new file mode 100644
index 000000000..96be59407
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Document.md
@@ -0,0 +1,113 @@
+
+# google.generativeai.protos.Document
+
+
+
+
+
+
+
+A ``Document`` is a collection of ``Chunk``\ s.
+
+
+ A ``Corpus`` can
+have a maximum of 10,000 ``Document``\ s.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Immutable. Identifier. The ``Document`` resource name. The
+ID (name excluding the `corpora/*/documents/` prefix) can
+contain up to 40 characters that are lowercase alphanumeric
+or dashes (-). The ID cannot start or end with a dash. If
+the name is empty on create, a unique name will be derived
+from ``display_name`` along with a 12 character random
+suffix. Example:
+``corpora/{corpus_id}/documents/my-awesome-doc-123a456b789c``
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+`str`
+
+Optional. The human-readable display name for the
+``Document``. The display name must be no more than 512
+characters in length, including spaces. Example: "Semantic
+Retriever Documentation".
+
+ |
+
+
+
+`custom_metadata`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.CustomMetadata]`
+
+Optional. User provided custom metadata stored as key-value
+pairs used for querying. A ``Document`` can have a maximum
+of 20 ``CustomMetadata``.
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Document`` was last
+updated.
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The Timestamp of when the ``Document`` was
+created.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/DynamicRetrievalConfig.md b/docs/api/google/generativeai/protos/DynamicRetrievalConfig.md
new file mode 100644
index 000000000..5e7b3405b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DynamicRetrievalConfig.md
@@ -0,0 +1,66 @@
+
+# google.generativeai.protos.DynamicRetrievalConfig
+
+
+
+
+
+
+
+Describes the options to customize dynamic retrieval.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`mode`
+
+ |
+
+
+`google.ai.generativelanguage.DynamicRetrievalConfig.Mode`
+
+The mode of the predictor to be used in
+dynamic retrieval.
+
+ |
+
+
+
+`dynamic_threshold`
+
+ |
+
+
+`float`
+
+The threshold to be used in dynamic
+retrieval. If not set, a system default value is
+used.
+
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Mode`](../../../google/generativeai/protos/DynamicRetrievalConfig/Mode.md)
+
diff --git a/docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md b/docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md
new file mode 100644
index 000000000..fcb8af8ef
--- /dev/null
+++ b/docs/api/google/generativeai/protos/DynamicRetrievalConfig/Mode.md
@@ -0,0 +1,651 @@
+
+# google.generativeai.protos.DynamicRetrievalConfig.Mode
+
+
+
+
+
+
+
+The mode of the predictor to be used in dynamic retrieval.
+
+
+google.generativeai.protos.DynamicRetrievalConfig.Mode(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`MODE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Always trigger retrieval.
+
+ |
+
+
+
+`MODE_DYNAMIC`
+
+ |
+
+
+`1`
+
+Run retrieval only when system decides it is
+necessary.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+MODE_DYNAMIC
+
+ |
+
+
+``
+
+ |
+
+
+
+MODE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedContentRequest.md b/docs/api/google/generativeai/protos/EmbedContentRequest.md
new file mode 100644
index 000000000..f219f20d1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedContentRequest.md
@@ -0,0 +1,117 @@
+
+# google.generativeai.protos.EmbedContentRequest
+
+
+
+
+
+
+
+Request containing the ``Content`` for the model to embed.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The model's resource name. This serves as an ID
+for the Model to use.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+
+ |
+
+
+
+`content`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Required. The content to embed. Only the ``parts.text``
+fields will be counted.
+
+ |
+
+
+
+`task_type`
+
+ |
+
+
+`google.ai.generativelanguage.TaskType`
+
+Optional. Optional task type for which the embeddings will
+be used. Can only be set for ``models/embedding-001``.
+
+
+ |
+
+
+
+`title`
+
+ |
+
+
+`str`
+
+Optional. An optional title for the text. Only applicable
+when TaskType is ``RETRIEVAL_DOCUMENT``.
+
+Note: Specifying a ``title`` for ``RETRIEVAL_DOCUMENT``
+provides better quality embeddings for retrieval.
+
+
+ |
+
+
+
+`output_dimensionality`
+
+ |
+
+
+`int`
+
+Optional. Optional reduced dimension for the output
+embedding. If set, excessive values in the output embedding
+are truncated from the end. Supported by newer models since
+2024 only. You cannot set this value if using the earlier
+model (``models/embedding-001``).
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedContentResponse.md b/docs/api/google/generativeai/protos/EmbedContentResponse.md
new file mode 100644
index 000000000..79b728d3c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedContentResponse.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.EmbedContentResponse
+
+
+
+
+
+
+
+The response to an ``EmbedContentRequest``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`embedding`
+
+ |
+
+
+`google.ai.generativelanguage.ContentEmbedding`
+
+Output only. The embedding generated from the
+input content.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedTextRequest.md b/docs/api/google/generativeai/protos/EmbedTextRequest.md
new file mode 100644
index 000000000..d390fb154
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedTextRequest.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.EmbedTextRequest
+
+
+
+
+
+
+
+Request to get a text embedding from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The model name to use with the
+format model=models/{model}.
+
+ |
+
+
+
+`text`
+
+ |
+
+
+`str`
+
+Optional. The free-form input text that the
+model will turn into an embedding.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/EmbedTextResponse.md b/docs/api/google/generativeai/protos/EmbedTextResponse.md
new file mode 100644
index 000000000..d4f47d38b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/EmbedTextResponse.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.EmbedTextResponse
+
+
+
+
+
+
+
+The response to a EmbedTextRequest.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`embedding`
+
+ |
+
+
+`google.ai.generativelanguage.Embedding`
+
+Output only. The embedding generated from the
+input text.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Embedding.md b/docs/api/google/generativeai/protos/Embedding.md
new file mode 100644
index 000000000..b44723b60
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Embedding.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.Embedding
+
+
+
+
+
+
+
+A list of floats representing the embedding.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`value`
+
+ |
+
+
+`MutableSequence[float]`
+
+The embedding values.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Example.md b/docs/api/google/generativeai/protos/Example.md
new file mode 100644
index 000000000..0707655fe
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Example.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.Example
+
+
+
+
+
+
+
+An input/output example used to instruct the Model.
+
+
+
+It demonstrates how the model should respond or format its
+response.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`input`
+
+ |
+
+
+`google.ai.generativelanguage.Message`
+
+Required. An example of an input ``Message`` from the user.
+
+ |
+
+
+
+`output`
+
+ |
+
+
+`google.ai.generativelanguage.Message`
+
+Required. An example of what the model should
+output given the input.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ExecutableCode.md b/docs/api/google/generativeai/protos/ExecutableCode.md
new file mode 100644
index 000000000..d486ec720
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ExecutableCode.md
@@ -0,0 +1,65 @@
+
+# google.generativeai.protos.ExecutableCode
+
+
+
+
+
+
+
+Code generated by the model that is meant to be executed, and the result returned to the model.
+
+
+
+Only generated when using the ``CodeExecution`` tool, in which the
+code will be automatically executed, and a corresponding
+``CodeExecutionResult`` will also be generated.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`language`
+
+ |
+
+
+`google.ai.generativelanguage.ExecutableCode.Language`
+
+Required. Programming language of the ``code``.
+
+ |
+
+
+
+`code`
+
+ |
+
+
+`str`
+
+Required. The code to be executed.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Language`](../../../google/generativeai/protos/ExecutableCode/Language.md)
+
diff --git a/docs/api/google/generativeai/protos/ExecutableCode/Language.md b/docs/api/google/generativeai/protos/ExecutableCode/Language.md
new file mode 100644
index 000000000..a880d8366
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ExecutableCode/Language.md
@@ -0,0 +1,652 @@
+
+# google.generativeai.protos.ExecutableCode.Language
+
+
+
+
+
+
+
+Supported programming languages for the generated code.
+
+
+google.generativeai.protos.ExecutableCode.Language(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`LANGUAGE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Unspecified language. This value should not
+be used.
+
+ |
+
+
+
+`PYTHON`
+
+ |
+
+
+`1`
+
+Python >= 3.10, with numpy and simpy
+available.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+LANGUAGE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+PYTHON
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/File.md b/docs/api/google/generativeai/protos/File.md
new file mode 100644
index 000000000..8466c3ad0
--- /dev/null
+++ b/docs/api/google/generativeai/protos/File.md
@@ -0,0 +1,205 @@
+
+# google.generativeai.protos.File
+
+
+
+
+
+
+
+A file uploaded to the API.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`video_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.VideoMetadata`
+
+Output only. Metadata for a video.
+
+This field is a member of `oneof`_ ``metadata``.
+
+ |
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Immutable. Identifier. The ``File`` resource name. The ID
+(name excluding the "files/" prefix) can contain up to 40
+characters that are lowercase alphanumeric or dashes (-).
+The ID cannot start or end with a dash. If the name is empty
+on create, a unique name will be generated. Example:
+``files/123-456``
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+`str`
+
+Optional. The human-readable display name for the ``File``.
+The display name must be no more than 512 characters in
+length, including spaces. Example: "Welcome Image".
+
+ |
+
+
+
+`mime_type`
+
+ |
+
+
+`str`
+
+Output only. MIME type of the file.
+
+ |
+
+
+
+`size_bytes`
+
+ |
+
+
+`int`
+
+Output only. Size of the file in bytes.
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp of when the ``File`` was created.
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp of when the ``File`` was last
+updated.
+
+ |
+
+
+
+`expiration_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp of when the ``File`` will be
+deleted. Only set if the ``File`` is scheduled to expire.
+
+ |
+
+
+
+`sha256_hash`
+
+ |
+
+
+`bytes`
+
+Output only. SHA-256 hash of the uploaded
+bytes.
+
+ |
+
+
+
+`uri`
+
+ |
+
+
+`str`
+
+Output only. The uri of the ``File``.
+
+ |
+
+
+
+`state`
+
+ |
+
+
+`google.ai.generativelanguage.File.State`
+
+Output only. Processing state of the File.
+
+ |
+
+
+
+`error`
+
+ |
+
+
+`google.rpc.status_pb2.Status`
+
+Output only. Error status if File processing
+failed.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class State`](../../../google/generativeai/protos/File/State.md)
+
diff --git a/docs/api/google/generativeai/protos/File/State.md b/docs/api/google/generativeai/protos/File/State.md
new file mode 100644
index 000000000..c8e3835ac
--- /dev/null
+++ b/docs/api/google/generativeai/protos/File/State.md
@@ -0,0 +1,701 @@
+
+# google.generativeai.protos.File.State
+
+
+
+
+
+
+
+States for the lifecycle of a File.
+
+
+google.generativeai.protos.File.State(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`STATE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+The default value. This value is used if the
+state is omitted.
+
+ |
+
+
+
+`PROCESSING`
+
+ |
+
+
+`1`
+
+File is being processed and cannot be used
+for inference yet.
+
+ |
+
+
+
+`ACTIVE`
+
+ |
+
+
+`2`
+
+File is processed and available for
+inference.
+
+ |
+
+
+
+`FAILED`
+
+ |
+
+
+`10`
+
+File failed processing.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+ACTIVE
+
+ |
+
+
+``
+
+ |
+
+
+
+FAILED
+
+ |
+
+
+``
+
+ |
+
+
+
+PROCESSING
+
+ |
+
+
+``
+
+ |
+
+
+
+STATE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/FileData.md b/docs/api/google/generativeai/protos/FileData.md
new file mode 100644
index 000000000..7676fa1f5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FileData.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.FileData
+
+
+
+
+
+
+
+URI based data.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`mime_type`
+
+ |
+
+
+`str`
+
+Optional. The IANA standard MIME type of the
+source data.
+
+ |
+
+
+
+`file_uri`
+
+ |
+
+
+`str`
+
+Required. URI.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionCall.md b/docs/api/google/generativeai/protos/FunctionCall.md
new file mode 100644
index 000000000..5d95ec884
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionCall.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.FunctionCall
+
+
+
+
+
+
+
+A predicted ``FunctionCall`` returned from the model that contains a string representing the FunctionDeclaration.name
with the arguments and their values.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the function to call.
+Must be a-z, A-Z, 0-9, or contain underscores
+and dashes, with a maximum length of 63.
+
+ |
+
+
+
+`args`
+
+ |
+
+
+`google.protobuf.struct_pb2.Struct`
+
+Optional. The function parameters and values
+in JSON object format.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig.md b/docs/api/google/generativeai/protos/FunctionCallingConfig.md
new file mode 100644
index 000000000..c0c0657f3
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionCallingConfig.md
@@ -0,0 +1,70 @@
+
+# google.generativeai.protos.FunctionCallingConfig
+
+
+
+
+
+
+
+Configuration for specifying function calling behavior.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`mode`
+
+ |
+
+
+`google.ai.generativelanguage.FunctionCallingConfig.Mode`
+
+Optional. Specifies the mode in which
+function calling should execute. If unspecified,
+the default value will be set to AUTO.
+
+ |
+
+
+
+`allowed_function_names`
+
+ |
+
+
+`MutableSequence[str]`
+
+Optional. A set of function names that, when provided,
+limits the functions the model will call.
+
+This should only be set when the Mode is ANY. Function names
+should match [FunctionDeclaration.name]. With mode set to
+ANY, model will predict a function call from the set of
+function names provided.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Mode`](../../../google/generativeai/protos/FunctionCallingConfig/Mode.md)
+
diff --git a/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
new file mode 100644
index 000000000..0ba361f5c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionCallingConfig/Mode.md
@@ -0,0 +1,707 @@
+
+# google.generativeai.protos.FunctionCallingConfig.Mode
+
+
+
+
+
+
+
+Defines the execution behavior for function calling by defining the execution mode.
+
+
+google.generativeai.protos.FunctionCallingConfig.Mode(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`MODE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Unspecified function calling mode. This value
+should not be used.
+
+ |
+
+
+
+`AUTO`
+
+ |
+
+
+`1`
+
+Default model behavior, model decides to
+predict either a function call or a natural
+language response.
+
+ |
+
+
+
+`ANY`
+
+ |
+
+
+`2`
+
+Model is constrained to always predicting a function call
+only. If "allowed_function_names" are set, the predicted
+function call will be limited to any one of
+"allowed_function_names", else the predicted function call
+will be any one of the provided "function_declarations".
+
+ |
+
+
+
+`NONE`
+
+ |
+
+
+`3`
+
+Model will not predict any function call.
+Model behavior is same as when not passing any
+function declarations.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+ANY
+
+ |
+
+
+``
+
+ |
+
+
+
+AUTO
+
+ |
+
+
+``
+
+ |
+
+
+
+MODE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+NONE
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionDeclaration.md b/docs/api/google/generativeai/protos/FunctionDeclaration.md
new file mode 100644
index 000000000..fb6ba658b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionDeclaration.md
@@ -0,0 +1,86 @@
+
+# google.generativeai.protos.FunctionDeclaration
+
+
+
+
+
+
+
+Structured representation of a function declaration as defined by the `OpenAPI 3.03 specification `__.
+
+
+ Included in
+this declaration are the function name and parameters. This
+FunctionDeclaration is a representation of a block of code that can
+be used as a ``Tool`` by the model and executed by the client.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the function.
+Must be a-z, A-Z, 0-9, or contain underscores
+and dashes, with a maximum length of 63.
+
+ |
+
+
+
+`description`
+
+ |
+
+
+`str`
+
+Required. A brief description of the
+function.
+
+ |
+
+
+
+`parameters`
+
+ |
+
+
+`google.ai.generativelanguage.Schema`
+
+Optional. Describes the parameters to this
+function. Reflects the Open API 3.03 Parameter
+Object string Key: the name of the parameter.
+Parameter names are case sensitive. Schema
+Value: the Schema defining the type used for the
+parameter.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/FunctionResponse.md b/docs/api/google/generativeai/protos/FunctionResponse.md
new file mode 100644
index 000000000..277825166
--- /dev/null
+++ b/docs/api/google/generativeai/protos/FunctionResponse.md
@@ -0,0 +1,63 @@
+
+# google.generativeai.protos.FunctionResponse
+
+
+
+
+
+
+
+The result output from a ``FunctionCall`` that contains a string representing the FunctionDeclaration.name
and a structured JSON object containing any output from the function is used as context to the model.
+
+
+ This should contain the result of a\ ``FunctionCall``
+made based on model prediction.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the function to call.
+Must be a-z, A-Z, 0-9, or contain underscores
+and dashes, with a maximum length of 63.
+
+ |
+
+
+
+`response`
+
+ |
+
+
+`google.protobuf.struct_pb2.Struct`
+
+Required. The function response in JSON
+object format.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md
new file mode 100644
index 000000000..24a43ce53
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerRequest.md
@@ -0,0 +1,177 @@
+
+# google.generativeai.protos.GenerateAnswerRequest
+
+
+
+
+
+
+
+Request to generate a grounded answer from the ``Model``.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`inline_passages`
+
+ |
+
+
+`google.ai.generativelanguage.GroundingPassages`
+
+Passages provided inline with the request.
+
+This field is a member of `oneof`_ ``grounding_source``.
+
+ |
+
+
+
+`semantic_retriever`
+
+ |
+
+
+`google.ai.generativelanguage.SemanticRetrieverConfig`
+
+Content retrieved from resources created via
+the Semantic Retriever API.
+
+This field is a member of `oneof`_ ``grounding_source``.
+
+ |
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Model`` to use for generating
+the grounded response.
+
+Format: ``model=models/{model}``.
+
+ |
+
+
+
+`contents`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Required. The content of the current conversation with the
+``Model``. For single-turn queries, this is a single
+question to answer. For multi-turn queries, this is a
+repeated field that contains conversation history and the
+last ``Content`` in the list containing the question.
+
+Note: ``GenerateAnswer`` only supports queries in English.
+
+ |
+
+
+
+`answer_style`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
+
+Required. Style in which answers should be
+returned.
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetySetting]`
+
+Optional. A list of unique ``SafetySetting`` instances for
+blocking unsafe content.
+
+This will be enforced on the
+GenerateAnswerRequest.contents and
+``GenerateAnswerResponse.candidate``. There should not be
+more than one setting for each ``SafetyCategory`` type. The
+API will block any contents and responses that fail to meet
+the thresholds set by these settings. This list overrides
+the default settings for each ``SafetyCategory`` specified
+in the safety_settings. If there is no ``SafetySetting`` for
+a given ``SafetyCategory`` provided in the list, the API
+will use the default safety setting for that category. Harm
+categories HARM_CATEGORY_HATE_SPEECH,
+HARM_CATEGORY_SEXUALLY_EXPLICIT,
+HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
+are supported. Refer to the
+`guide `__
+for detailed information on available safety settings. Also
+refer to the `Safety
+guidance `__
+to learn how to incorporate safety considerations in your AI
+applications.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Values can range from [0.0,1.0], inclusive. A value closer
+to 1.0 will produce responses that are more varied and
+creative, while a value closer to 0.0 will typically result
+in more straightforward responses from the model. A low
+temperature (~0.2) is usually recommended for
+Attributed-Question-Answering use cases.
+
+
+ |
+
+
+
+
+
+## Child Classes
+[`class AnswerStyle`](../../../google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
new file mode 100644
index 000000000..4f1b53ae1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerRequest/AnswerStyle.md
@@ -0,0 +1,701 @@
+
+# google.generativeai.protos.GenerateAnswerRequest.AnswerStyle
+
+
+
+
+
+
+
+Style for grounded answers.
+
+
+google.generativeai.protos.GenerateAnswerRequest.AnswerStyle(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`ANSWER_STYLE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Unspecified answer style.
+
+ |
+
+
+
+`ABSTRACTIVE`
+
+ |
+
+
+`1`
+
+Succint but abstract style.
+
+ |
+
+
+
+`EXTRACTIVE`
+
+ |
+
+
+`2`
+
+Very brief and extractive style.
+
+ |
+
+
+
+`VERBOSE`
+
+ |
+
+
+`3`
+
+Verbose style including extra details. The
+response may be formatted as a sentence,
+paragraph, multiple paragraphs, or bullet
+points, etc.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+ABSTRACTIVE
+
+ |
+
+
+``
+
+ |
+
+
+
+ANSWER_STYLE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+EXTRACTIVE
+
+ |
+
+
+``
+
+ |
+
+
+
+VERBOSE
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md
new file mode 100644
index 000000000..9e9f57fb6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse.md
@@ -0,0 +1,108 @@
+
+# google.generativeai.protos.GenerateAnswerResponse
+
+
+
+
+
+
+
+Response from the model for a grounded answer.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`answer`
+
+ |
+
+
+`google.ai.generativelanguage.Candidate`
+
+Candidate answer from the model.
+
+Note: The model *always* attempts to provide a grounded
+answer, even when the answer is unlikely to be answerable
+from the given passages. In that case, a low-quality or
+ungrounded answer may be provided, along with a low
+``answerable_probability``.
+
+ |
+
+
+
+`answerable_probability`
+
+ |
+
+
+`float`
+
+Output only. The model's estimate of the probability that
+its answer is correct and grounded in the input passages.
+
+A low ``answerable_probability`` indicates that the answer
+might not be grounded in the sources.
+
+When ``answerable_probability`` is low, you may want to:
+
+- Display a message to the effect of "We couldn’t answer
+ that question" to the user.
+- Fall back to a general-purpose LLM that answers the
+ question from world knowledge. The threshold and nature
+ of such fallbacks will depend on individual use cases.
+ ``0.5`` is a good starting threshold.
+
+
+ |
+
+
+
+`input_feedback`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback`
+
+Output only. Feedback related to the input data used to
+answer the question, as opposed to the model-generated
+response to the question.
+
+The input data can be one or more of the following:
+
+- Question specified by the last entry in
+ ``GenerateAnswerRequest.content``
+- Conversation history specified by the other entries in
+ ``GenerateAnswerRequest.content``
+- Grounding sources
+ (GenerateAnswerRequest.semantic_retriever or
+ GenerateAnswerRequest.inline_passages )
+
+
+ |
+
+
+
+
+
+## Child Classes
+[`class InputFeedback`](../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
new file mode 100644
index 000000000..3b51e3b05
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback.md
@@ -0,0 +1,66 @@
+
+# google.generativeai.protos.GenerateAnswerResponse.InputFeedback
+
+
+
+
+
+
+
+Feedback related to the input data used to answer the question, as opposed to the model-generated response to the question.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`block_reason`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateAnswerResponse.InputFeedback.BlockReason`
+
+Optional. If set, the input was blocked and
+no candidates are returned. Rephrase the input.
+
+
+ |
+
+
+
+`safety_ratings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+Ratings for safety of the input.
+There is at most one rating per category.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class BlockReason`](../../../../google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
new file mode 100644
index 000000000..fd5b36814
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateAnswerResponse/InputFeedback/BlockReason.md
@@ -0,0 +1,676 @@
+
+# google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason
+
+
+
+
+
+
+
+Specifies what was the reason why input was blocked.
+
+
+google.generativeai.protos.GenerateAnswerResponse.InputFeedback.BlockReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`BLOCK_REASON_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Default value. This value is unused.
+
+ |
+
+
+
+`SAFETY`
+
+ |
+
+
+`1`
+
+Input was blocked due to safety reasons. Inspect
+``safety_ratings`` to understand which safety category
+blocked it.
+
+ |
+
+
+
+`OTHER`
+
+ |
+
+
+`2`
+
+Input was blocked due to other reasons.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+BLOCK_REASON_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+OTHER
+
+ |
+
+
+``
+
+ |
+
+
+
+SAFETY
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentRequest.md b/docs/api/google/generativeai/protos/GenerateContentRequest.md
new file mode 100644
index 000000000..0bab8ea0c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentRequest.md
@@ -0,0 +1,192 @@
+
+# google.generativeai.protos.GenerateContentRequest
+
+
+
+
+
+
+
+Request to generate a completion from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Model`` to use for generating
+the completion.
+
+Format: ``name=models/{model}``.
+
+ |
+
+
+
+`system_instruction`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Optional. Developer set `system
+instruction(s) `__.
+Currently, text only.
+
+
+ |
+
+
+
+`contents`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Content]`
+
+Required. The content of the current conversation with the
+model.
+
+For single-turn queries, this is a single instance. For
+multi-turn queries like
+`chat `__,
+this is a repeated field that contains the conversation
+history and the latest request.
+
+ |
+
+
+
+`tools`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Tool]`
+
+Optional. A list of ``Tools`` the ``Model`` may use to
+generate the next response.
+
+A ``Tool`` is a piece of code that enables the system to
+interact with external systems to perform an action, or set
+of actions, outside of knowledge and scope of the ``Model``.
+Supported ``Tool``\ s are ``Function`` and
+``code_execution``. Refer to the `Function
+calling `__
+and the `Code
+execution `__
+guides to learn more.
+
+ |
+
+
+
+`tool_config`
+
+ |
+
+
+`google.ai.generativelanguage.ToolConfig`
+
+Optional. Tool configuration for any ``Tool`` specified in
+the request. Refer to the `Function calling
+guide `__
+for a usage example.
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetySetting]`
+
+Optional. A list of unique ``SafetySetting`` instances for
+blocking unsafe content.
+
+This will be enforced on the
+GenerateContentRequest.contents and
+GenerateContentResponse.candidates . There should not be
+more than one setting for each ``SafetyCategory`` type. The
+API will block any contents and responses that fail to meet
+the thresholds set by these settings. This list overrides
+the default settings for each ``SafetyCategory`` specified
+in the safety_settings. If there is no ``SafetySetting`` for
+a given ``SafetyCategory`` provided in the list, the API
+will use the default safety setting for that category. Harm
+categories HARM_CATEGORY_HATE_SPEECH,
+HARM_CATEGORY_SEXUALLY_EXPLICIT,
+HARM_CATEGORY_DANGEROUS_CONTENT, HARM_CATEGORY_HARASSMENT
+are supported. Refer to the
+`guide `__
+for detailed information on available safety settings. Also
+refer to the `Safety
+guidance `__
+to learn how to incorporate safety considerations in your AI
+applications.
+
+ |
+
+
+
+`generation_config`
+
+ |
+
+
+`google.ai.generativelanguage.GenerationConfig`
+
+Optional. Configuration options for model
+generation and outputs.
+
+
+ |
+
+
+
+`cached_content`
+
+ |
+
+
+`str`
+
+Optional. The name of the content
+`cached `__
+to use as context to serve the prediction. Format:
+``cachedContents/{cachedContent}``
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse.md b/docs/api/google/generativeai/protos/GenerateContentResponse.md
new file mode 100644
index 000000000..a19fa46cc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse.md
@@ -0,0 +1,88 @@
+
+# google.generativeai.protos.GenerateContentResponse
+
+
+
+
+
+
+
+Response from the model supporting multiple candidate responses.
+
+
+
+Safety ratings and content filtering are reported for both prompt in
+GenerateContentResponse.prompt_feedback
and for each candidate
+in ``finish_reason`` and in ``safety_ratings``. The API:
+
+- Returns either all requested candidates or none of them
+- Returns no candidates at all only if there was something wrong
+ with the prompt (check ``prompt_feedback``)
+- Reports feedback on each candidate in ``finish_reason`` and
+ ``safety_ratings``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidates`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Candidate]`
+
+Candidate responses from the model.
+
+ |
+
+
+
+`prompt_feedback`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback`
+
+Returns the prompt's feedback related to the
+content filters.
+
+ |
+
+
+
+`usage_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateContentResponse.UsageMetadata`
+
+Output only. Metadata on the generation
+requests' token usage.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class PromptFeedback`](../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback.md)
+
+[`class UsageMetadata`](../../../google/generativeai/protos/GenerateContentResponse/UsageMetadata.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
new file mode 100644
index 000000000..bdf993a82
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.GenerateContentResponse.PromptFeedback
+
+
+
+
+
+
+
+A set of the feedback metadata the prompt specified in ``GenerateContentRequest.content``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`block_reason`
+
+ |
+
+
+`google.ai.generativelanguage.GenerateContentResponse.PromptFeedback.BlockReason`
+
+Optional. If set, the prompt was blocked and
+no candidates are returned. Rephrase the prompt.
+
+ |
+
+
+
+`safety_ratings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+Ratings for safety of the prompt.
+There is at most one rating per category.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class BlockReason`](../../../../google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md)
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
new file mode 100644
index 000000000..df7bb344f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse/PromptFeedback/BlockReason.md
@@ -0,0 +1,725 @@
+
+# google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason
+
+
+
+
+
+
+
+Specifies the reason why the prompt was blocked.
+
+
+google.generativeai.protos.GenerateContentResponse.PromptFeedback.BlockReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`BLOCK_REASON_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Default value. This value is unused.
+
+ |
+
+
+
+`SAFETY`
+
+ |
+
+
+`1`
+
+Prompt was blocked due to safety reasons. Inspect
+``safety_ratings`` to understand which safety category
+blocked it.
+
+ |
+
+
+
+`OTHER`
+
+ |
+
+
+`2`
+
+Prompt was blocked due to unknown reasons.
+
+ |
+
+
+
+`BLOCKLIST`
+
+ |
+
+
+`3`
+
+Prompt was blocked due to the terms which are
+included from the terminology blocklist.
+
+ |
+
+
+
+`PROHIBITED_CONTENT`
+
+ |
+
+
+`4`
+
+Prompt was blocked due to prohibited content.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+BLOCKLIST
+
+ |
+
+
+``
+
+ |
+
+
+
+BLOCK_REASON_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+OTHER
+
+ |
+
+
+``
+
+ |
+
+
+
+PROHIBITED_CONTENT
+
+ |
+
+
+``
+
+ |
+
+
+
+SAFETY
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
new file mode 100644
index 000000000..1aee78acc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateContentResponse/UsageMetadata.md
@@ -0,0 +1,90 @@
+
+# google.generativeai.protos.GenerateContentResponse.UsageMetadata
+
+
+
+
+
+
+
+Metadata on the generation request's token usage.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`prompt_token_count`
+
+ |
+
+
+`int`
+
+Number of tokens in the prompt. When ``cached_content`` is
+set, this is still the total effective prompt size meaning
+this includes the number of tokens in the cached content.
+
+ |
+
+
+
+`cached_content_token_count`
+
+ |
+
+
+`int`
+
+Number of tokens in the cached part of the
+prompt (the cached content)
+
+ |
+
+
+
+`candidates_token_count`
+
+ |
+
+
+`int`
+
+Total number of tokens across all the
+generated response candidates.
+
+ |
+
+
+
+`total_token_count`
+
+ |
+
+
+`int`
+
+Total token count for the generation request
+(prompt + response candidates).
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateMessageRequest.md b/docs/api/google/generativeai/protos/GenerateMessageRequest.md
new file mode 100644
index 000000000..ed737707f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateMessageRequest.md
@@ -0,0 +1,142 @@
+
+# google.generativeai.protos.GenerateMessageRequest
+
+
+
+
+
+
+
+Request to generate a message response from the model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The name of the model to use.
+
+Format: ``name=models/{model}``.
+
+ |
+
+
+
+`prompt`
+
+ |
+
+
+`google.ai.generativelanguage.MessagePrompt`
+
+Required. The structured textual input given
+to the model as a prompt.
+Given a
+prompt, the model will return what it predicts
+is the next message in the discussion.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Values can range over ``[0.0,1.0]``, inclusive. A value
+closer to ``1.0`` will produce responses that are more
+varied, while a value closer to ``0.0`` will typically
+result in less surprising responses from the model.
+
+
+ |
+
+
+
+`candidate_count`
+
+ |
+
+
+`int`
+
+Optional. The number of generated response messages to
+return.
+
+This value must be between ``[1, 8]``, inclusive. If unset,
+this will default to ``1``.
+
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+`float`
+
+Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Nucleus sampling considers the smallest set of tokens whose
+probability sum is at least ``top_p``.
+
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateMessageResponse.md b/docs/api/google/generativeai/protos/GenerateMessageResponse.md
new file mode 100644
index 000000000..52ed9e51d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateMessageResponse.md
@@ -0,0 +1,81 @@
+
+# google.generativeai.protos.GenerateMessageResponse
+
+
+
+
+
+
+
+The response from the model.
+
+
+
+This includes candidate messages and
+conversation history in the form of chronologically-ordered
+messages.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidates`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Message]`
+
+Candidate response messages from the model.
+
+ |
+
+
+
+`messages`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Message]`
+
+The conversation history used by the model.
+
+ |
+
+
+
+`filters`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.ContentFilter]`
+
+A set of content filtering metadata for the prompt and
+response text.
+
+This indicates which ``SafetyCategory``\ (s) blocked a
+candidate from this response, the lowest ``HarmProbability``
+that triggered a block, and the HarmThreshold setting for
+that category.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateTextRequest.md b/docs/api/google/generativeai/protos/GenerateTextRequest.md
new file mode 100644
index 000000000..d6aed4add
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateTextRequest.md
@@ -0,0 +1,219 @@
+
+# google.generativeai.protos.GenerateTextRequest
+
+
+
+
+
+
+
+Request to generate a text completion response from the model.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Model`` or ``TunedModel`` to use
+for generating the completion. Examples:
+models/text-bison-001 tunedModels/sentence-translator-u3b7m
+
+ |
+
+
+
+`prompt`
+
+ |
+
+
+`google.ai.generativelanguage.TextPrompt`
+
+Required. The free-form input text given to
+the model as a prompt.
+Given a prompt, the model will generate a
+TextCompletion response it predicts as the
+completion of the input text.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+`float`
+
+Optional. Controls the randomness of the output. Note: The
+default value varies by model, see the Model.temperature
+attribute of the ``Model`` returned the ``getModel``
+function.
+
+Values can range from [0.0,1.0], inclusive. A value closer
+to 1.0 will produce responses that are more varied and
+creative, while a value closer to 0.0 will typically result
+in more straightforward responses from the model.
+
+
+ |
+
+
+
+`candidate_count`
+
+ |
+
+
+`int`
+
+Optional. Number of generated responses to return.
+
+This value must be between [1, 8], inclusive. If unset, this
+will default to 1.
+
+
+ |
+
+
+
+`max_output_tokens`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of tokens to include in a
+candidate.
+
+If unset, this will default to output_token_limit specified
+in the ``Model`` specification.
+
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+`float`
+
+Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Tokens are sorted based on their assigned probabilities so
+that only the most likely tokens are considered. Top-k
+sampling directly limits the maximum number of tokens to
+consider, while Nucleus sampling limits number of tokens
+based on the cumulative probability.
+
+Note: The default value varies by model, see the
+Model.top_p attribute of the ``Model`` returned the
+``getModel`` function.
+
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens. Defaults to 40.
+
+Note: The default value varies by model, see the
+Model.top_k attribute of the ``Model`` returned the
+``getModel`` function.
+
+
+ |
+
+
+
+`safety_settings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetySetting]`
+
+Optional. A list of unique ``SafetySetting`` instances for
+blocking unsafe content.
+
+that will be enforced on the GenerateTextRequest.prompt
+and GenerateTextResponse.candidates . There should not be
+more than one setting for each ``SafetyCategory`` type. The
+API will block any prompts and responses that fail to meet
+the thresholds set by these settings. This list overrides
+the default settings for each ``SafetyCategory`` specified
+in the safety_settings. If there is no ``SafetySetting`` for
+a given ``SafetyCategory`` provided in the list, the API
+will use the default safety setting for that category. Harm
+categories HARM_CATEGORY_DEROGATORY, HARM_CATEGORY_TOXICITY,
+HARM_CATEGORY_VIOLENCE, HARM_CATEGORY_SEXUAL,
+HARM_CATEGORY_MEDICAL, HARM_CATEGORY_DANGEROUS are supported
+in text service.
+
+ |
+
+
+
+`stop_sequences`
+
+ |
+
+
+`MutableSequence[str]`
+
+The set of character sequences (up to 5) that
+will stop output generation. If specified, the
+API will stop at the first appearance of a stop
+sequence. The stop sequence will not be included
+as part of the response.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerateTextResponse.md b/docs/api/google/generativeai/protos/GenerateTextResponse.md
new file mode 100644
index 000000000..a7b07a833
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerateTextResponse.md
@@ -0,0 +1,84 @@
+
+# google.generativeai.protos.GenerateTextResponse
+
+
+
+
+
+
+
+The response from the model, including candidate completions.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidates`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.TextCompletion]`
+
+Candidate responses from the model.
+
+ |
+
+
+
+`filters`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.ContentFilter]`
+
+A set of content filtering metadata for the prompt and
+response text.
+
+This indicates which ``SafetyCategory``\ (s) blocked a
+candidate from this response, the lowest ``HarmProbability``
+that triggered a block, and the HarmThreshold setting for
+that category. This indicates the smallest change to the
+``SafetySettings`` that would be necessary to unblock at
+least 1 response.
+
+The blocking is configured by the ``SafetySettings`` in the
+request (or the default ``SafetySettings`` of the API).
+
+ |
+
+
+
+`safety_feedback`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetyFeedback]`
+
+Returns any safety feedback related to
+content filtering.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GenerationConfig.md b/docs/api/google/generativeai/protos/GenerationConfig.md
new file mode 100644
index 000000000..5a068c968
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GenerationConfig.md
@@ -0,0 +1,297 @@
+
+# google.generativeai.protos.GenerationConfig
+
+
+
+
+
+
+
+Configuration options for model generation and outputs.
+
+
+ Not
+all parameters are configurable for every model.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidate_count`
+
+ |
+
+
+`int`
+
+Optional. Number of generated responses to
+return.
+Currently, this value can only be set to 1. If
+unset, this will default to 1.
+
+
+ |
+
+
+
+`stop_sequences`
+
+ |
+
+
+`MutableSequence[str]`
+
+Optional. The set of character sequences (up to 5) that will
+stop output generation. If specified, the API will stop at
+the first appearance of a ``stop_sequence``. The stop
+sequence will not be included as part of the response.
+
+ |
+
+
+
+`max_output_tokens`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of tokens to include in a
+response candidate.
+
+Note: The default value varies by model, see the
+Model.output_token_limit attribute of the ``Model``
+returned from the ``getModel`` function.
+
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Note: The default value varies by model, see the
+Model.temperature attribute of the ``Model`` returned
+from the ``getModel`` function.
+
+Values can range from [0.0, 2.0].
+
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+`float`
+
+Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and Top-p (nucleus) sampling.
+
+Tokens are sorted based on their assigned probabilities so
+that only the most likely tokens are considered. Top-k
+sampling directly limits the maximum number of tokens to
+consider, while Nucleus sampling limits the number of tokens
+based on the cumulative probability.
+
+Note: The default value varies by ``Model`` and is specified
+by the\ Model.top_p attribute returned from the
+``getModel`` function. An empty ``top_k`` attribute
+indicates that the model doesn't apply top-k sampling and
+doesn't allow setting ``top_k`` on requests.
+
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+Gemini models use Top-p (nucleus) sampling or a combination
+of Top-k and nucleus sampling. Top-k sampling considers the
+set of ``top_k`` most probable tokens. Models running with
+nucleus sampling don't allow top_k setting.
+
+Note: The default value varies by ``Model`` and is specified
+by the\ Model.top_p attribute returned from the
+``getModel`` function. An empty ``top_k`` attribute
+indicates that the model doesn't apply top-k sampling and
+doesn't allow setting ``top_k`` on requests.
+
+
+ |
+
+
+
+`response_mime_type`
+
+ |
+
+
+`str`
+
+Optional. MIME type of the generated candidate text.
+Supported MIME types are: ``text/plain``: (default) Text
+output. ``application/json``: JSON response in the response
+candidates. ``text/x.enum``: ENUM as a string response in
+the response candidates. Refer to the
+`docs `__
+for a list of all supported text MIME types.
+
+ |
+
+
+
+`response_schema`
+
+ |
+
+
+`google.ai.generativelanguage.Schema`
+
+Optional. Output schema of the generated candidate text.
+Schemas must be a subset of the `OpenAPI
+schema `__ and
+can be objects, primitives or arrays.
+
+If set, a compatible ``response_mime_type`` must also be
+set. Compatible MIME types: ``application/json``: Schema for
+JSON response. Refer to the `JSON text generation
+guide `__
+for more details.
+
+ |
+
+
+
+`presence_penalty`
+
+ |
+
+
+`float`
+
+Optional. Presence penalty applied to the next token's
+logprobs if the token has already been seen in the response.
+
+This penalty is binary on/off and not dependant on the
+number of times the token is used (after the first). Use
+[frequency_penalty][google.ai.generativelanguage.v1beta.GenerationConfig.frequency_penalty]
+for a penalty that increases with each use.
+
+A positive penalty will discourage the use of tokens that
+have already been used in the response, increasing the
+vocabulary.
+
+A negative penalty will encourage the use of tokens that
+have already been used in the response, decreasing the
+vocabulary.
+
+
+ |
+
+
+
+`frequency_penalty`
+
+ |
+
+
+`float`
+
+Optional. Frequency penalty applied to the next token's
+logprobs, multiplied by the number of times each token has
+been seen in the respponse so far.
+
+A positive penalty will discourage the use of tokens that
+have already been used, proportional to the number of times
+the token has been used: The more a token is used, the more
+difficult it is for the model to use that token again
+increasing the vocabulary of responses.
+
+Caution: A *negative* penalty will encourage the model to
+reuse tokens proportional to the number of times the token
+has been used. Small negative values will reduce the
+vocabulary of a response. Larger negative values will cause
+the model to start repeating a common token until it hits
+the
+[max_output_tokens][google.ai.generativelanguage.v1beta.GenerationConfig.max_output_tokens]
+limit: "...the the the the the...".
+
+
+ |
+
+
+
+`response_logprobs`
+
+ |
+
+
+`bool`
+
+Optional. If true, export the logprobs
+results in response.
+
+
+ |
+
+
+
+`logprobs`
+
+ |
+
+
+`int`
+
+Optional. Only valid if
+[response_logprobs=True][google.ai.generativelanguage.v1beta.GenerationConfig.response_logprobs].
+This sets the number of top logprobs to return at each
+decoding step in the
+[Candidate.logprobs_result][google.ai.generativelanguage.v1beta.Candidate.logprobs_result].
+
+This field is a member of `oneof`_ ``_logprobs``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetCachedContentRequest.md b/docs/api/google/generativeai/protos/GetCachedContentRequest.md
new file mode 100644
index 000000000..90b218160
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetCachedContentRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.GetCachedContentRequest
+
+
+
+
+
+
+
+Request to read CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name referring to the content cache
+entry. Format: ``cachedContents/{id}``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetChunkRequest.md b/docs/api/google/generativeai/protos/GetChunkRequest.md
new file mode 100644
index 000000000..e6af445dc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetChunkRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.GetChunkRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Chunk`` to retrieve. Example:
+``corpora/my-corpus-123/documents/the-doc-abc/chunks/some-chunk``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetCorpusRequest.md b/docs/api/google/generativeai/protos/GetCorpusRequest.md
new file mode 100644
index 000000000..bfe9f9dff
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetCorpusRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.GetCorpusRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Corpus``. Example:
+``corpora/my-corpus-123``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetDocumentRequest.md b/docs/api/google/generativeai/protos/GetDocumentRequest.md
new file mode 100644
index 000000000..8d4b83e4e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetDocumentRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.GetDocumentRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Document`` to retrieve. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetFileRequest.md b/docs/api/google/generativeai/protos/GetFileRequest.md
new file mode 100644
index 000000000..dba220926
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetFileRequest.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.GetFileRequest
+
+
+
+
+
+
+
+Request for ``GetFile``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``File`` to get. Example:
+``files/abc-123``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetModelRequest.md b/docs/api/google/generativeai/protos/GetModelRequest.md
new file mode 100644
index 000000000..455f32b64
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetModelRequest.md
@@ -0,0 +1,51 @@
+
+# google.generativeai.protos.GetModelRequest
+
+
+
+
+
+
+
+Request for getting information about a specific Model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the model.
+
+This name should match a model name returned by the
+``ListModels`` method.
+
+Format: ``models/{model}``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetPermissionRequest.md b/docs/api/google/generativeai/protos/GetPermissionRequest.md
new file mode 100644
index 000000000..0b26ec3bb
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetPermissionRequest.md
@@ -0,0 +1,50 @@
+
+# google.generativeai.protos.GetPermissionRequest
+
+
+
+
+
+
+
+Request for getting information about a specific ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the permission.
+
+Formats:
+``tunedModels/{tuned_model}/permissions/{permission}``
+``corpora/{corpus}/permissions/{permission}``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GetTunedModelRequest.md b/docs/api/google/generativeai/protos/GetTunedModelRequest.md
new file mode 100644
index 000000000..725980887
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GetTunedModelRequest.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.GetTunedModelRequest
+
+
+
+
+
+
+
+Request for getting information about a specific Model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the model.
+
+Format: ``tunedModels/my-model-id``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GoogleSearchRetrieval.md b/docs/api/google/generativeai/protos/GoogleSearchRetrieval.md
new file mode 100644
index 000000000..9d795e8ed
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GoogleSearchRetrieval.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.GoogleSearchRetrieval
+
+
+
+
+
+
+
+Tool to retrieve public web data for grounding, powered by Google.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`dynamic_retrieval_config`
+
+ |
+
+
+`google.ai.generativelanguage.DynamicRetrievalConfig`
+
+Specifies the dynamic retrieval configuration
+for the given source.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingAttribution.md b/docs/api/google/generativeai/protos/GroundingAttribution.md
new file mode 100644
index 000000000..f07e399b3
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingAttribution.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.GroundingAttribution
+
+
+
+
+
+
+
+Attribution for a source that contributed to an answer.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`source_id`
+
+ |
+
+
+`google.ai.generativelanguage.AttributionSourceId`
+
+Output only. Identifier for the source
+contributing to this attribution.
+
+ |
+
+
+
+`content`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Grounding source content that makes up this
+attribution.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingChunk.md b/docs/api/google/generativeai/protos/GroundingChunk.md
new file mode 100644
index 000000000..adb637a71
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingChunk.md
@@ -0,0 +1,51 @@
+
+# google.generativeai.protos.GroundingChunk
+
+
+
+
+
+
+
+Grounding chunk.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`web`
+
+ |
+
+
+`google.ai.generativelanguage.GroundingChunk.Web`
+
+Grounding chunk from the web.
+
+This field is a member of `oneof`_ ``chunk_type``.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Web`](../../../google/generativeai/protos/GroundingChunk/Web.md)
+
diff --git a/docs/api/google/generativeai/protos/GroundingChunk/Web.md b/docs/api/google/generativeai/protos/GroundingChunk/Web.md
new file mode 100644
index 000000000..f9757e0c8
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingChunk/Web.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.GroundingChunk.Web
+
+
+
+
+
+
+
+Chunk from the web.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`uri`
+
+ |
+
+
+`str`
+
+URI reference of the chunk.
+
+
+ |
+
+
+
+`title`
+
+ |
+
+
+`str`
+
+Title of the chunk.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingMetadata.md b/docs/api/google/generativeai/protos/GroundingMetadata.md
new file mode 100644
index 000000000..54811ea73
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingMetadata.md
@@ -0,0 +1,90 @@
+
+# google.generativeai.protos.GroundingMetadata
+
+
+
+
+
+
+
+Metadata returned to client when grounding is enabled.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`search_entry_point`
+
+ |
+
+
+`google.ai.generativelanguage.SearchEntryPoint`
+
+Optional. Google search entry for the
+following-up web searches.
+
+
+ |
+
+
+
+`grounding_chunks`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.GroundingChunk]`
+
+List of supporting references retrieved from
+specified grounding source.
+
+ |
+
+
+
+`grounding_supports`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.GroundingSupport]`
+
+List of grounding support.
+
+ |
+
+
+
+`retrieval_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.RetrievalMetadata`
+
+Metadata related to retrieval in the
+grounding flow.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingPassage.md b/docs/api/google/generativeai/protos/GroundingPassage.md
new file mode 100644
index 000000000..9b0e45d17
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingPassage.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.GroundingPassage
+
+
+
+
+
+
+
+Passage included inline with a grounding configuration.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`id`
+
+ |
+
+
+`str`
+
+Identifier for the passage for attributing
+this passage in grounded answers.
+
+ |
+
+
+
+`content`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Content of the passage.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingPassages.md b/docs/api/google/generativeai/protos/GroundingPassages.md
new file mode 100644
index 000000000..a3b353808
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingPassages.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.GroundingPassages
+
+
+
+
+
+
+
+A repeated list of passages.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`passages`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.GroundingPassage]`
+
+List of passages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/GroundingSupport.md b/docs/api/google/generativeai/protos/GroundingSupport.md
new file mode 100644
index 000000000..d3ddd823c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/GroundingSupport.md
@@ -0,0 +1,80 @@
+
+# google.generativeai.protos.GroundingSupport
+
+
+
+
+
+
+
+Grounding support.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`segment`
+
+ |
+
+
+`google.ai.generativelanguage.Segment`
+
+Segment of the content this support belongs
+to.
+
+
+ |
+
+
+
+`grounding_chunk_indices`
+
+ |
+
+
+`MutableSequence[int]`
+
+A list of indices (into 'grounding_chunk') specifying the
+citations associated with the claim. For instance [1,3,4]
+means that grounding_chunk[1], grounding_chunk[3],
+grounding_chunk[4] are the retrieved content attributed to
+the claim.
+
+ |
+
+
+
+`confidence_scores`
+
+ |
+
+
+`MutableSequence[float]`
+
+Confidence score of the support references. Ranges from 0 to
+1. 1 is the most confident. This list must have the same
+size as the grounding_chunk_indices.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/HarmCategory.md b/docs/api/google/generativeai/protos/HarmCategory.md
new file mode 100644
index 000000000..cd7a72b9d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/HarmCategory.md
@@ -0,0 +1,897 @@
+
+# google.generativeai.protos.HarmCategory
+
+
+
+
+
+
+
+The category of a rating.
+
+
+google.generativeai.protos.HarmCategory(
+ *args, **kwds
+)
+
+
+
+
+
+
+These categories cover various kinds of harms that developers
+may wish to adjust.
+
+
+
+
+Values |
+
+
+
+
+`HARM_CATEGORY_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Category is unspecified.
+
+ |
+
+
+
+`HARM_CATEGORY_DEROGATORY`
+
+ |
+
+
+`1`
+
+**PaLM** - Negative or harmful comments targeting identity
+and/or protected attribute.
+
+ |
+
+
+
+`HARM_CATEGORY_TOXICITY`
+
+ |
+
+
+`2`
+
+**PaLM** - Content that is rude, disrespectful, or profane.
+
+ |
+
+
+
+`HARM_CATEGORY_VIOLENCE`
+
+ |
+
+
+`3`
+
+**PaLM** - Describes scenarios depicting violence against an
+individual or group, or general descriptions of gore.
+
+ |
+
+
+
+`HARM_CATEGORY_SEXUAL`
+
+ |
+
+
+`4`
+
+**PaLM** - Contains references to sexual acts or other lewd
+content.
+
+ |
+
+
+
+`HARM_CATEGORY_MEDICAL`
+
+ |
+
+
+`5`
+
+**PaLM** - Promotes unchecked medical advice.
+
+ |
+
+
+
+`HARM_CATEGORY_DANGEROUS`
+
+ |
+
+
+`6`
+
+**PaLM** - Dangerous content that promotes, facilitates, or
+encourages harmful acts.
+
+ |
+
+
+
+`HARM_CATEGORY_HARASSMENT`
+
+ |
+
+
+`7`
+
+**Gemini** - Harassment content.
+
+ |
+
+
+
+`HARM_CATEGORY_HATE_SPEECH`
+
+ |
+
+
+`8`
+
+**Gemini** - Hate speech and content.
+
+ |
+
+
+
+`HARM_CATEGORY_SEXUALLY_EXPLICIT`
+
+ |
+
+
+`9`
+
+**Gemini** - Sexually explicit content.
+
+ |
+
+
+
+`HARM_CATEGORY_DANGEROUS_CONTENT`
+
+ |
+
+
+`10`
+
+**Gemini** - Dangerous content.
+
+ |
+
+
+
+`HARM_CATEGORY_CIVIC_INTEGRITY`
+
+ |
+
+
+`11`
+
+**Gemini** - Content that may be used to harm civic
+integrity.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+HARM_CATEGORY_CIVIC_INTEGRITY
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_DANGEROUS
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_DANGEROUS_CONTENT
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_DEROGATORY
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_HARASSMENT
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_HATE_SPEECH
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_MEDICAL
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_SEXUAL
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_SEXUALLY_EXPLICIT
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_TOXICITY
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_VIOLENCE
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Hyperparameters.md b/docs/api/google/generativeai/protos/Hyperparameters.md
new file mode 100644
index 000000000..80067de5c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Hyperparameters.md
@@ -0,0 +1,110 @@
+
+# google.generativeai.protos.Hyperparameters
+
+
+
+
+
+
+
+Hyperparameters controlling the tuning process.
+
+
+ Read more at
+https://ai.google.dev/docs/model_tuning_guidance
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`learning_rate`
+
+ |
+
+
+`float`
+
+Optional. Immutable. The learning rate
+hyperparameter for tuning. If not set, a default
+of 0.001 or 0.0002 will be calculated based on
+the number of training examples.
+
+This field is a member of `oneof`_ ``learning_rate_option``.
+
+ |
+
+
+
+`learning_rate_multiplier`
+
+ |
+
+
+`float`
+
+Optional. Immutable. The learning rate multiplier is used to
+calculate a final learning_rate based on the default
+(recommended) value. Actual learning rate :=
+learning_rate_multiplier \* default learning rate Default
+learning rate is dependent on base model and dataset size.
+If not set, a default of 1.0 will be used.
+
+This field is a member of `oneof`_ ``learning_rate_option``.
+
+ |
+
+
+
+`epoch_count`
+
+ |
+
+
+`int`
+
+Immutable. The number of training epochs. An
+epoch is one pass through the training data. If
+not set, a default of 5 will be used.
+
+
+ |
+
+
+
+`batch_size`
+
+ |
+
+
+`int`
+
+Immutable. The batch size hyperparameter for
+tuning. If not set, a default of 4 or 16 will be
+used based on the number of training examples.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCachedContentsRequest.md b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md
new file mode 100644
index 000000000..72da58e3d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCachedContentsRequest.md
@@ -0,0 +1,70 @@
+
+# google.generativeai.protos.ListCachedContentsRequest
+
+
+
+
+
+
+
+Request to list CachedContents.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of cached
+contents to return. The service may return fewer
+than this value. If unspecified, some default
+(under maximum) number of items will be
+returned. The maximum value is 1000; values
+above 1000 will be coerced to 1000.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token, received from a previous
+``ListCachedContents`` call. Provide this to retrieve the
+subsequent page.
+
+When paginating, all other parameters provided to
+``ListCachedContents`` must match the call that provided the
+page token.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCachedContentsResponse.md b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md
new file mode 100644
index 000000000..a55772386
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCachedContentsResponse.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.ListCachedContentsResponse
+
+
+
+
+
+
+
+Response with CachedContents list.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`cached_contents`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.CachedContent]`
+
+List of cached contents.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no subsequent
+pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListChunksRequest.md b/docs/api/google/generativeai/protos/ListChunksRequest.md
new file mode 100644
index 000000000..db163b75f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListChunksRequest.md
@@ -0,0 +1,86 @@
+
+# google.generativeai.protos.ListChunksRequest
+
+
+
+
+
+
+
+Request for listing ``Chunk``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Document`` containing
+``Chunk``\ s. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``Chunk``\ s to return (per
+page). The service may return fewer ``Chunk``\ s.
+
+If unspecified, at most 10 ``Chunk``\ s will be returned.
+The maximum size limit is 100 ``Chunk``\ s per page.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token, received from a previous
+``ListChunks`` call.
+
+Provide the ``next_page_token`` returned in the response as
+an argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListChunks`` must match the call that provided the page
+token.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListChunksResponse.md b/docs/api/google/generativeai/protos/ListChunksResponse.md
new file mode 100644
index 000000000..32c0cfdfc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListChunksResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.ListChunksResponse
+
+
+
+
+
+
+
+Response from ``ListChunks`` containing a paginated list of ``Chunk``\ s.
+
+
+ The ``Chunk``\ s are sorted by ascending
+``chunk.create_time``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`chunks`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Chunk]`
+
+The returned ``Chunk``\ s.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no more
+pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCorporaRequest.md b/docs/api/google/generativeai/protos/ListCorporaRequest.md
new file mode 100644
index 000000000..ec0eace8c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCorporaRequest.md
@@ -0,0 +1,71 @@
+
+# google.generativeai.protos.ListCorporaRequest
+
+
+
+
+
+
+
+Request for listing ``Corpora``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``Corpora`` to return (per
+page). The service may return fewer ``Corpora``.
+
+If unspecified, at most 10 ``Corpora`` will be returned. The
+maximum size limit is 20 ``Corpora`` per page.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token, received from a previous
+``ListCorpora`` call.
+
+Provide the ``next_page_token`` returned in the response as
+an argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListCorpora`` must match the call that provided the page
+token.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListCorporaResponse.md b/docs/api/google/generativeai/protos/ListCorporaResponse.md
new file mode 100644
index 000000000..d66a5abbe
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListCorporaResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.ListCorporaResponse
+
+
+
+
+
+
+
+Response from ``ListCorpora`` containing a paginated list of ``Corpora``.
+
+
+ The results are sorted by ascending
+``corpus.create_time``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`corpora`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Corpus]`
+
+The returned corpora.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no more
+pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListDocumentsRequest.md b/docs/api/google/generativeai/protos/ListDocumentsRequest.md
new file mode 100644
index 000000000..ae33ad435
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListDocumentsRequest.md
@@ -0,0 +1,85 @@
+
+# google.generativeai.protos.ListDocumentsRequest
+
+
+
+
+
+
+
+Request for listing ``Document``\ s.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Corpus`` containing
+``Document``\ s. Example: ``corpora/my-corpus-123``
+
+ |
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``Document``\ s to return
+(per page). The service may return fewer ``Document``\ s.
+
+If unspecified, at most 10 ``Document``\ s will be returned.
+The maximum size limit is 20 ``Document``\ s per page.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token, received from a previous
+``ListDocuments`` call.
+
+Provide the ``next_page_token`` returned in the response as
+an argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListDocuments`` must match the call that provided the page
+token.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListDocumentsResponse.md b/docs/api/google/generativeai/protos/ListDocumentsResponse.md
new file mode 100644
index 000000000..3c98c4992
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListDocumentsResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.ListDocumentsResponse
+
+
+
+
+
+
+
+Response from ``ListDocuments`` containing a paginated list of ``Document``\ s.
+
+
+ The ``Document``\ s are sorted by ascending
+``document.create_time``.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`documents`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Document]`
+
+The returned ``Document``\ s.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page. If this field is omitted, there are no more
+pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListFilesRequest.md b/docs/api/google/generativeai/protos/ListFilesRequest.md
new file mode 100644
index 000000000..31874b586
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListFilesRequest.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.ListFilesRequest
+
+
+
+
+
+
+
+Request for ``ListFiles``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. Maximum number of ``File``\ s to return per page.
+If unspecified, defaults to 10. Maximum ``page_size`` is
+100.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token from a previous ``ListFiles`` call.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListFilesResponse.md b/docs/api/google/generativeai/protos/ListFilesResponse.md
new file mode 100644
index 000000000..92b6fc1f4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListFilesResponse.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.ListFilesResponse
+
+
+
+
+
+
+
+Response for ``ListFiles``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`files`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.File]`
+
+The list of ``File``\ s.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token that can be sent as a ``page_token`` into a
+subsequent ``ListFiles`` call.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListModelsRequest.md b/docs/api/google/generativeai/protos/ListModelsRequest.md
new file mode 100644
index 000000000..25bcaa195
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListModelsRequest.md
@@ -0,0 +1,70 @@
+
+# google.generativeai.protos.ListModelsRequest
+
+
+
+
+
+
+
+Request for listing all Models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+The maximum number of ``Models`` to return (per page).
+
+If unspecified, 50 models will be returned per page. This
+method returns at most 1000 models per page, even if you
+pass a larger page_size.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+A page token, received from a previous ``ListModels`` call.
+
+Provide the ``page_token`` returned by one request as an
+argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListModels`` must match the call that provided the page
+token.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListModelsResponse.md b/docs/api/google/generativeai/protos/ListModelsResponse.md
new file mode 100644
index 000000000..c85950641
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListModelsResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.ListModelsResponse
+
+
+
+
+
+
+
+Response from ``ListModel`` containing a paginated list of Models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`models`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Model]`
+
+The returned Models.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page.
+
+If this field is omitted, there are no more pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListPermissionsRequest.md b/docs/api/google/generativeai/protos/ListPermissionsRequest.md
new file mode 100644
index 000000000..053d9495b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListPermissionsRequest.md
@@ -0,0 +1,86 @@
+
+# google.generativeai.protos.ListPermissionsRequest
+
+
+
+
+
+
+
+Request for listing permissions.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+`str`
+
+Required. The parent resource of the permissions. Formats:
+``tunedModels/{tuned_model}`` ``corpora/{corpus}``
+
+ |
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``Permission``\ s to return
+(per page). The service may return fewer permissions.
+
+If unspecified, at most 10 permissions will be returned.
+This method returns at most 1000 permissions per page, even
+if you pass larger page_size.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token, received from a previous
+``ListPermissions`` call.
+
+Provide the ``page_token`` returned by one request as an
+argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListPermissions`` must match the call that provided the
+page token.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListPermissionsResponse.md b/docs/api/google/generativeai/protos/ListPermissionsResponse.md
new file mode 100644
index 000000000..8fab74f1c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListPermissionsResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.ListPermissionsResponse
+
+
+
+
+
+
+
+Response from ``ListPermissions`` containing a paginated list of permissions.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`permissions`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Permission]`
+
+Returned permissions.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page.
+
+If this field is omitted, there are no more pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListTunedModelsRequest.md b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md
new file mode 100644
index 000000000..69f5f5590
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListTunedModelsRequest.md
@@ -0,0 +1,103 @@
+
+# google.generativeai.protos.ListTunedModelsRequest
+
+
+
+
+
+
+
+Request for listing TunedModels.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`page_size`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``TunedModels`` to return
+(per page). The service may return fewer tuned models.
+
+If unspecified, at most 10 tuned models will be returned.
+This method returns at most 1000 models per page, even if
+you pass a larger page_size.
+
+ |
+
+
+
+`page_token`
+
+ |
+
+
+`str`
+
+Optional. A page token, received from a previous
+``ListTunedModels`` call.
+
+Provide the ``page_token`` returned by one request as an
+argument to the next request to retrieve the next page.
+
+When paginating, all other parameters provided to
+``ListTunedModels`` must match the call that provided the
+page token.
+
+ |
+
+
+
+`filter`
+
+ |
+
+
+`str`
+
+Optional. A filter is a full text search over
+the tuned model's description and display name.
+By default, results will not include tuned
+models shared with everyone.
+
+Additional operators:
+
+ - owner:me
+ - writers:me
+ - readers:me
+ - readers:everyone
+
+Examples:
+
+ "owner:me" returns all tuned models to which
+caller has owner role "readers:me" returns all
+tuned models to which caller has reader role
+"readers:everyone" returns all tuned models that
+are shared with everyone
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ListTunedModelsResponse.md b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md
new file mode 100644
index 000000000..0487606bd
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ListTunedModelsResponse.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.protos.ListTunedModelsResponse
+
+
+
+
+
+
+
+Response from ``ListTunedModels`` containing a paginated list of Models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`tuned_models`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.TunedModel]`
+
+The returned Models.
+
+ |
+
+
+
+`next_page_token`
+
+ |
+
+
+`str`
+
+A token, which can be sent as ``page_token`` to retrieve the
+next page.
+
+If this field is omitted, there are no more pages.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/LogprobsResult.md b/docs/api/google/generativeai/protos/LogprobsResult.md
new file mode 100644
index 000000000..59ec1949a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/LogprobsResult.md
@@ -0,0 +1,65 @@
+
+# google.generativeai.protos.LogprobsResult
+
+
+
+
+
+
+
+Logprobs Result
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`top_candidates`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.LogprobsResult.TopCandidates]`
+
+Length = total number of decoding steps.
+
+ |
+
+
+
+`chosen_candidates`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.LogprobsResult.Candidate]`
+
+Length = total number of decoding steps. The chosen
+candidates may or may not be in top_candidates.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class Candidate`](../../../google/generativeai/protos/LogprobsResult/Candidate.md)
+
+[`class TopCandidates`](../../../google/generativeai/protos/LogprobsResult/TopCandidates.md)
+
diff --git a/docs/api/google/generativeai/protos/LogprobsResult/Candidate.md b/docs/api/google/generativeai/protos/LogprobsResult/Candidate.md
new file mode 100644
index 000000000..59d88d3b1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/LogprobsResult/Candidate.md
@@ -0,0 +1,75 @@
+
+# google.generativeai.protos.LogprobsResult.Candidate
+
+
+
+
+
+
+
+Candidate for the logprobs token and score.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`token`
+
+ |
+
+
+`str`
+
+The candidate’s token string value.
+
+
+ |
+
+
+
+`token_id`
+
+ |
+
+
+`int`
+
+The candidate’s token id value.
+
+
+ |
+
+
+
+`log_probability`
+
+ |
+
+
+`float`
+
+The candidate's log probability.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md b/docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md
new file mode 100644
index 000000000..9a8d5bba2
--- /dev/null
+++ b/docs/api/google/generativeai/protos/LogprobsResult/TopCandidates.md
@@ -0,0 +1,47 @@
+
+# google.generativeai.protos.LogprobsResult.TopCandidates
+
+
+
+
+
+
+
+Candidates with top log probabilities at each decoding step.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidates`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.LogprobsResult.Candidate]`
+
+Sorted by log probability in descending
+order.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Message.md b/docs/api/google/generativeai/protos/Message.md
new file mode 100644
index 000000000..5f0dd52c1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Message.md
@@ -0,0 +1,92 @@
+
+# google.generativeai.protos.Message
+
+
+
+
+
+
+
+The base unit of structured text.
+
+
+
+A ``Message`` includes an ``author`` and the ``content`` of the
+``Message``.
+
+The ``author`` is used to tag messages when they are fed to the
+model as text.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`author`
+
+ |
+
+
+`str`
+
+Optional. The author of this Message.
+
+This serves as a key for tagging
+the content of this Message when it is fed to
+the model as text.
+
+The author can be any alphanumeric string.
+
+ |
+
+
+
+`content`
+
+ |
+
+
+`str`
+
+Required. The text content of the structured ``Message``.
+
+ |
+
+
+
+`citation_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.CitationMetadata`
+
+Output only. Citation information for model-generated
+``content`` in this ``Message``.
+
+If this ``Message`` was generated as output from the model,
+this field may be populated with attribution information for
+any text included in the ``content``. This field is used
+only on output.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/MessagePrompt.md b/docs/api/google/generativeai/protos/MessagePrompt.md
new file mode 100644
index 000000000..6da9cfd45
--- /dev/null
+++ b/docs/api/google/generativeai/protos/MessagePrompt.md
@@ -0,0 +1,109 @@
+
+# google.generativeai.protos.MessagePrompt
+
+
+
+
+
+
+
+All of the structured input text passed to the model as a prompt.
+
+
+
+A ``MessagePrompt`` contains a structured set of fields that provide
+context for the conversation, examples of user input/model output
+message pairs that prime the model to respond in different ways, and
+the conversation history or list of messages representing the
+alternating turns of the conversation between the user and the
+model.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`context`
+
+ |
+
+
+`str`
+
+Optional. Text that should be provided to the model first to
+ground the response.
+
+If not empty, this ``context`` will be given to the model
+first before the ``examples`` and ``messages``. When using a
+``context`` be sure to provide it with every request to
+maintain continuity.
+
+This field can be a description of your prompt to the model
+to help provide context and guide the responses. Examples:
+"Translate the phrase from English to French." or "Given a
+statement, classify the sentiment as happy, sad or neutral."
+
+Anything included in this field will take precedence over
+message history if the total input size exceeds the model's
+``input_token_limit`` and the input request is truncated.
+
+ |
+
+
+
+`examples`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Example]`
+
+Optional. Examples of what the model should generate.
+
+This includes both user input and the response that the
+model should emulate.
+
+These ``examples`` are treated identically to conversation
+messages except that they take precedence over the history
+in ``messages``: If the total input size exceeds the model's
+``input_token_limit`` the input will be truncated. Items
+will be dropped from ``messages`` before ``examples``.
+
+ |
+
+
+
+`messages`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Message]`
+
+Required. A snapshot of the recent conversation history
+sorted chronologically.
+
+Turns alternate between two authors.
+
+If the total input size exceeds the model's
+``input_token_limit`` the input will be truncated: The
+oldest items will be dropped from ``messages``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/MetadataFilter.md b/docs/api/google/generativeai/protos/MetadataFilter.md
new file mode 100644
index 000000000..f5c33833b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/MetadataFilter.md
@@ -0,0 +1,65 @@
+
+# google.generativeai.protos.MetadataFilter
+
+
+
+
+
+
+
+User provided filter to limit retrieval based on ``Chunk`` or ``Document`` level metadata values.
+
+
+ Example (genre = drama OR genre
+= action): key = "document.custom_metadata.genre" conditions =
+[{string_value = "drama", operation = EQUAL}, {string_value =
+"action", operation = EQUAL}]
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`key`
+
+ |
+
+
+`str`
+
+Required. The key of the metadata to filter
+on.
+
+ |
+
+
+
+`conditions`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.Condition]`
+
+Required. The ``Condition``\ s for the given key that will
+trigger this filter. Multiple ``Condition``\ s are joined by
+logical ORs.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Model.md b/docs/api/google/generativeai/protos/Model.md
new file mode 100644
index 000000000..dbc0d1c79
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Model.md
@@ -0,0 +1,240 @@
+
+# google.generativeai.protos.Model
+
+
+
+
+
+
+
+Information about a Generative Language Model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the ``Model``. Refer to
+`Model
+variants `__
+for all allowed values.
+
+Format: ``models/{model}`` with a ``{model}`` naming
+convention of:
+
+- "{base_model_id}-{version}"
+
+Examples:
+
+- ``models/gemini-1.5-flash-001``
+
+ |
+
+
+
+`base_model_id`
+
+ |
+
+
+`str`
+
+Required. The name of the base model, pass this to the
+generation request.
+
+Examples:
+
+- ``gemini-1.5-flash``
+
+ |
+
+
+
+`version`
+
+ |
+
+
+`str`
+
+Required. The version number of the model.
+
+This represents the major version (``1.0`` or ``1.5``)
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+`str`
+
+The human-readable name of the model. E.g.
+"Gemini 1.5 Flash".
+The name can be up to 128 characters long and
+can consist of any UTF-8 characters.
+
+ |
+
+
+
+`description`
+
+ |
+
+
+`str`
+
+A short description of the model.
+
+ |
+
+
+
+`input_token_limit`
+
+ |
+
+
+`int`
+
+Maximum number of input tokens allowed for
+this model.
+
+ |
+
+
+
+`output_token_limit`
+
+ |
+
+
+`int`
+
+Maximum number of output tokens available for
+this model.
+
+ |
+
+
+
+`supported_generation_methods`
+
+ |
+
+
+`MutableSequence[str]`
+
+The model's supported generation methods.
+
+The corresponding API method names are defined as Pascal
+case strings, such as ``generateMessage`` and
+``generateContent``.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+`float`
+
+Controls the randomness of the output.
+
+Values can range over ``[0.0,max_temperature]``, inclusive.
+A higher value will produce responses that are more varied,
+while a value closer to ``0.0`` will typically result in
+less surprising responses from the model. This value
+specifies default to be used by the backend while making the
+call to the model.
+
+
+ |
+
+
+
+`max_temperature`
+
+ |
+
+
+`float`
+
+The maximum temperature this model can use.
+
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+`float`
+
+For `Nucleus
+sampling `__.
+
+Nucleus sampling considers the smallest set of tokens whose
+probability sum is at least ``top_p``. This value specifies
+default to be used by the backend while making the call to
+the model.
+
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+`int`
+
+For Top-k sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens. This value specifies default to be used by the
+backend while making the call to the model. If empty,
+indicates the model doesn't use top-k sampling, and
+``top_k`` isn't allowed as a generation parameter.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Part.md b/docs/api/google/generativeai/protos/Part.md
new file mode 100644
index 000000000..53497f688
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Part.md
@@ -0,0 +1,158 @@
+
+# google.generativeai.protos.Part
+
+
+
+
+
+
+
+A datatype containing media that is part of a multi-part ``Content`` message.
+
+
+
+A ``Part`` consists of data which has an associated datatype. A
+``Part`` can only contain one of the accepted types in
+``Part.data``.
+
+A ``Part`` must have a fixed IANA MIME type identifying the type and
+subtype of the media if the ``inline_data`` field is filled with raw
+bytes.
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`text`
+
+ |
+
+
+`str`
+
+Inline text.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+`inline_data`
+
+ |
+
+
+`google.ai.generativelanguage.Blob`
+
+Inline media bytes.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+`function_call`
+
+ |
+
+
+`google.ai.generativelanguage.FunctionCall`
+
+A predicted ``FunctionCall`` returned from the model that
+contains a string representing the
+FunctionDeclaration.name with the arguments and their
+values.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+`function_response`
+
+ |
+
+
+`google.ai.generativelanguage.FunctionResponse`
+
+The result output of a ``FunctionCall`` that contains a
+string representing the FunctionDeclaration.name and a
+structured JSON object containing any output from the
+function is used as context to the model.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+`file_data`
+
+ |
+
+
+`google.ai.generativelanguage.FileData`
+
+URI based data.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+`executable_code`
+
+ |
+
+
+`google.ai.generativelanguage.ExecutableCode`
+
+Code generated by the model that is meant to
+be executed.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+`code_execution_result`
+
+ |
+
+
+`google.ai.generativelanguage.CodeExecutionResult`
+
+Result of executing the ``ExecutableCode``.
+
+This field is a member of `oneof`_ ``data``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Permission.md b/docs/api/google/generativeai/protos/Permission.md
new file mode 100644
index 000000000..18cfc35bb
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Permission.md
@@ -0,0 +1,118 @@
+
+# google.generativeai.protos.Permission
+
+
+
+
+
+
+
+Permission resource grants user, group or the rest of the world access to the PaLM API resource (e.g.
+
+
+ a tuned model,
+corpus).
+
+A role is a collection of permitted operations that allows users
+to perform specific actions on PaLM API resources. To make them
+available to users, groups, or service accounts, you assign
+roles. When you assign a role, you grant permissions that the
+role contains.
+
+There are three concentric roles. Each role is a superset of the
+previous role's permitted operations:
+
+- reader can use the resource (e.g. tuned model, corpus) for
+ inference
+- writer has reader's permissions and additionally can edit and
+ share
+- owner has writer's permissions and additionally can delete
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Output only. Identifier. The permission name. A unique name
+will be generated on create. Examples:
+tunedModels/{tuned_model}/permissions/{permission}
+corpora/{corpus}/permissions/{permission} Output only.
+
+ |
+
+
+
+`grantee_type`
+
+ |
+
+
+`google.ai.generativelanguage.Permission.GranteeType`
+
+Optional. Immutable. The type of the grantee.
+
+
+ |
+
+
+
+`email_address`
+
+ |
+
+
+`str`
+
+Optional. Immutable. The email address of the
+user of group which this permission refers.
+Field is not set when permission's grantee type
+is EVERYONE.
+
+
+ |
+
+
+
+`role`
+
+ |
+
+
+`google.ai.generativelanguage.Permission.Role`
+
+Required. The role granted by this
+permission.
+
+
+ |
+
+
+
+
+
+## Child Classes
+[`class GranteeType`](../../../google/generativeai/protos/Permission/GranteeType.md)
+
+[`class Role`](../../../google/generativeai/protos/Permission/Role.md)
+
diff --git a/docs/api/google/generativeai/protos/Permission/GranteeType.md b/docs/api/google/generativeai/protos/Permission/GranteeType.md
new file mode 100644
index 000000000..807fcc4dd
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Permission/GranteeType.md
@@ -0,0 +1,701 @@
+
+# google.generativeai.protos.Permission.GranteeType
+
+
+
+
+
+
+
+Defines types of the grantee of this permission.
+
+
+google.generativeai.protos.Permission.GranteeType(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`GRANTEE_TYPE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+The default value. This value is unused.
+
+ |
+
+
+
+`USER`
+
+ |
+
+
+`1`
+
+Represents a user. When set, you must provide email_address
+for the user.
+
+ |
+
+
+
+`GROUP`
+
+ |
+
+
+`2`
+
+Represents a group. When set, you must provide email_address
+for the group.
+
+ |
+
+
+
+`EVERYONE`
+
+ |
+
+
+`3`
+
+Represents access to everyone. No extra
+information is required.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+EVERYONE
+
+ |
+
+
+``
+
+ |
+
+
+
+GRANTEE_TYPE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+GROUP
+
+ |
+
+
+``
+
+ |
+
+
+
+USER
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/Permission/Role.md b/docs/api/google/generativeai/protos/Permission/Role.md
new file mode 100644
index 000000000..12f34993e
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Permission/Role.md
@@ -0,0 +1,700 @@
+
+# google.generativeai.protos.Permission.Role
+
+
+
+
+
+
+
+Defines the role granted by this permission.
+
+
+google.generativeai.protos.Permission.Role(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`ROLE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+The default value. This value is unused.
+
+ |
+
+
+
+`OWNER`
+
+ |
+
+
+`1`
+
+Owner can use, update, share and delete the
+resource.
+
+ |
+
+
+
+`WRITER`
+
+ |
+
+
+`2`
+
+Writer can use, update and share the
+resource.
+
+ |
+
+
+
+`READER`
+
+ |
+
+
+`3`
+
+Reader can use the resource.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+OWNER
+
+ |
+
+
+``
+
+ |
+
+
+
+READER
+
+ |
+
+
+``
+
+ |
+
+
+
+ROLE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+WRITER
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/PredictRequest.md b/docs/api/google/generativeai/protos/PredictRequest.md
new file mode 100644
index 000000000..c28fdcaeb
--- /dev/null
+++ b/docs/api/google/generativeai/protos/PredictRequest.md
@@ -0,0 +1,75 @@
+
+# google.generativeai.protos.PredictRequest
+
+
+
+
+
+
+
+Request message for [PredictionService.Predict][google.ai.generativelanguage.v1beta.PredictionService.Predict].
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`model`
+
+ |
+
+
+`str`
+
+Required. The name of the model for prediction. Format:
+``name=models/{model}``.
+
+ |
+
+
+
+`instances`
+
+ |
+
+
+`MutableSequence[google.protobuf.struct_pb2.Value]`
+
+Required. The instances that are the input to
+the prediction call.
+
+ |
+
+
+
+`parameters`
+
+ |
+
+
+`google.protobuf.struct_pb2.Value`
+
+Optional. The parameters that govern the
+prediction call.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/PredictResponse.md b/docs/api/google/generativeai/protos/PredictResponse.md
new file mode 100644
index 000000000..71bc979a3
--- /dev/null
+++ b/docs/api/google/generativeai/protos/PredictResponse.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.PredictResponse
+
+
+
+
+
+
+
+Response message for [PredictionService.Predict].
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`predictions`
+
+ |
+
+
+`MutableSequence[google.protobuf.struct_pb2.Value]`
+
+The outputs of the prediction call.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryCorpusRequest.md b/docs/api/google/generativeai/protos/QueryCorpusRequest.md
new file mode 100644
index 000000000..c7e328a35
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryCorpusRequest.md
@@ -0,0 +1,119 @@
+
+# google.generativeai.protos.QueryCorpusRequest
+
+
+
+
+
+
+
+Request for querying a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Corpus`` to query. Example:
+``corpora/my-corpus-123``
+
+ |
+
+
+
+`query`
+
+ |
+
+
+`str`
+
+Required. Query string to perform semantic
+search.
+
+ |
+
+
+
+`metadata_filters`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
+
+Optional. Filter for ``Chunk`` and ``Document`` metadata.
+Each ``MetadataFilter`` object should correspond to a unique
+key. Multiple ``MetadataFilter`` objects are joined by
+logical "AND"s.
+
+Example query at document level: (year >= 2020 OR year <
+2010) AND (genre = drama OR genre = action)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"document.custom_metadata.year" conditions = [{int_value =
+2020, operation = GREATER_EQUAL}, {int_value = 2010,
+operation = LESS}]}, {key = "document.custom_metadata.year"
+conditions = [{int_value = 2020, operation = GREATER_EQUAL},
+{int_value = 2010, operation = LESS}]}, {key =
+"document.custom_metadata.genre" conditions = [{string_value
+= "drama", operation = EQUAL}, {string_value = "action",
+operation = EQUAL}]}]
+
+Example query at chunk level for a numeric range of values:
+(year > 2015 AND year <= 2020)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2015, operation = GREATER}]}, {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2020, operation = LESS_EQUAL}]}]
+
+Note: "AND"s for the same key are only supported for numeric
+values. String values only support "OR"s for the same key.
+
+ |
+
+
+
+`results_count`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``Chunk``\ s to return. The
+service may return fewer ``Chunk``\ s.
+
+If unspecified, at most 10 ``Chunk``\ s will be returned.
+The maximum specified result count is 100.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryCorpusResponse.md b/docs/api/google/generativeai/protos/QueryCorpusResponse.md
new file mode 100644
index 000000000..e2b73d063
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryCorpusResponse.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.QueryCorpusResponse
+
+
+
+
+
+
+
+Response from ``QueryCorpus`` containing a list of relevant chunks.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`relevant_chunks`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.RelevantChunk]`
+
+The relevant chunks.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryDocumentRequest.md b/docs/api/google/generativeai/protos/QueryDocumentRequest.md
new file mode 100644
index 000000000..55fd996f3
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryDocumentRequest.md
@@ -0,0 +1,119 @@
+
+# google.generativeai.protos.QueryDocumentRequest
+
+
+
+
+
+
+
+Request for querying a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The name of the ``Document`` to query. Example:
+``corpora/my-corpus-123/documents/the-doc-abc``
+
+ |
+
+
+
+`query`
+
+ |
+
+
+`str`
+
+Required. Query string to perform semantic
+search.
+
+ |
+
+
+
+`results_count`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of ``Chunk``\ s to return. The
+service may return fewer ``Chunk``\ s.
+
+If unspecified, at most 10 ``Chunk``\ s will be returned.
+The maximum specified result count is 100.
+
+ |
+
+
+
+`metadata_filters`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
+
+Optional. Filter for ``Chunk`` metadata. Each
+``MetadataFilter`` object should correspond to a unique key.
+Multiple ``MetadataFilter`` objects are joined by logical
+"AND"s.
+
+Note: ``Document``-level filtering is not supported for this
+request because a ``Document`` name is already specified.
+
+Example query: (year >= 2020 OR year < 2010) AND (genre =
+drama OR genre = action)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2020, operation = GREATER_EQUAL}, {int_value = 2010,
+operation = LESS}}, {key = "chunk.custom_metadata.genre"
+conditions = [{string_value = "drama", operation = EQUAL},
+{string_value = "action", operation = EQUAL}}]
+
+Example query for a numeric range of values: (year > 2015
+AND year <= 2020)
+
+``MetadataFilter`` object list: metadata_filters = [ {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2015, operation = GREATER}]}, {key =
+"chunk.custom_metadata.year" conditions = [{int_value =
+2020, operation = LESS_EQUAL}]}]
+
+Note: "AND"s for the same key are only supported for numeric
+values. String values only support "OR"s for the same key.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/QueryDocumentResponse.md b/docs/api/google/generativeai/protos/QueryDocumentResponse.md
new file mode 100644
index 000000000..cb41e8bb4
--- /dev/null
+++ b/docs/api/google/generativeai/protos/QueryDocumentResponse.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.QueryDocumentResponse
+
+
+
+
+
+
+
+Response from ``QueryDocument`` containing a list of relevant chunks.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`relevant_chunks`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.RelevantChunk]`
+
+The returned relevant chunks.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/RelevantChunk.md b/docs/api/google/generativeai/protos/RelevantChunk.md
new file mode 100644
index 000000000..ab4a05ed1
--- /dev/null
+++ b/docs/api/google/generativeai/protos/RelevantChunk.md
@@ -0,0 +1,59 @@
+
+# google.generativeai.protos.RelevantChunk
+
+
+
+
+
+
+
+The information for a chunk relevant to a query.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`chunk_relevance_score`
+
+ |
+
+
+`float`
+
+``Chunk`` relevance to the query.
+
+ |
+
+
+
+`chunk`
+
+ |
+
+
+`google.ai.generativelanguage.Chunk`
+
+``Chunk`` associated with the query.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/RetrievalMetadata.md b/docs/api/google/generativeai/protos/RetrievalMetadata.md
new file mode 100644
index 000000000..eec4e6906
--- /dev/null
+++ b/docs/api/google/generativeai/protos/RetrievalMetadata.md
@@ -0,0 +1,52 @@
+
+# google.generativeai.protos.RetrievalMetadata
+
+
+
+
+
+
+
+Metadata related to retrieval in the grounding flow.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`google_search_dynamic_retrieval_score`
+
+ |
+
+
+`float`
+
+Optional. Score indicating how likely information from
+google search could help answer the prompt. The score is in
+the range [0, 1], where 0 is the least likely and 1 is the
+most likely. This score is only populated when google search
+grounding and dynamic retrieval is enabled. It will be
+compared to the threshold to determine whether to trigger
+google search.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SafetyFeedback.md b/docs/api/google/generativeai/protos/SafetyFeedback.md
new file mode 100644
index 000000000..d0f1c766a
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SafetyFeedback.md
@@ -0,0 +1,65 @@
+
+# google.generativeai.protos.SafetyFeedback
+
+
+
+
+
+
+
+Safety feedback for an entire request.
+
+
+
+This field is populated if content in the input and/or response
+is blocked due to safety settings. SafetyFeedback may not exist
+for every HarmCategory. Each SafetyFeedback will return the
+safety settings used by the request as well as the lowest
+HarmProbability that should be allowed in order to return a
+result.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`rating`
+
+ |
+
+
+`google.ai.generativelanguage.SafetyRating`
+
+Safety rating evaluated from content.
+
+ |
+
+
+
+`setting`
+
+ |
+
+
+`google.ai.generativelanguage.SafetySetting`
+
+Safety settings applied to the request.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SafetyRating.md b/docs/api/google/generativeai/protos/SafetyRating.md
new file mode 100644
index 000000000..6dd3fa3a6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SafetyRating.md
@@ -0,0 +1,82 @@
+
+# google.generativeai.protos.SafetyRating
+
+
+
+
+
+
+
+Safety rating for a piece of content.
+
+
+
+The safety rating contains the category of harm and the harm
+probability level in that category for a piece of content.
+Content is classified for safety across a number of harm
+categories and the probability of the harm classification is
+included here.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`category`
+
+ |
+
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this rating.
+
+ |
+
+
+
+`probability`
+
+ |
+
+
+`google.ai.generativelanguage.SafetyRating.HarmProbability`
+
+Required. The probability of harm for this
+content.
+
+ |
+
+
+
+`blocked`
+
+ |
+
+
+`bool`
+
+Was this content blocked because of this
+rating?
+
+ |
+
+
+
+
+
+## Child Classes
+[`class HarmProbability`](../../../google/generativeai/types/HarmProbability.md)
+
diff --git a/docs/api/google/generativeai/protos/SafetySetting.md b/docs/api/google/generativeai/protos/SafetySetting.md
new file mode 100644
index 000000000..3a21e77d5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SafetySetting.md
@@ -0,0 +1,65 @@
+
+# google.generativeai.protos.SafetySetting
+
+
+
+
+
+
+
+Safety setting, affecting the safety-blocking behavior.
+
+
+
+Passing a safety setting for a category changes the allowed
+probability that content is blocked.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`category`
+
+ |
+
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this setting.
+
+ |
+
+
+
+`threshold`
+
+ |
+
+
+`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold`
+
+Required. Controls the probability threshold
+at which harm is blocked.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class HarmBlockThreshold`](../../../google/generativeai/types/HarmBlockThreshold.md)
+
diff --git a/docs/api/google/generativeai/protos/Schema.md b/docs/api/google/generativeai/protos/Schema.md
new file mode 100644
index 000000000..732e9cd43
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Schema.md
@@ -0,0 +1,186 @@
+
+# google.generativeai.protos.Schema
+
+
+
+
+
+
+
+The ``Schema`` object allows the definition of input and output data types.
+
+
+ These types can be objects, but also primitives and arrays.
+Represents a select subset of an `OpenAPI 3.0 schema
+object `__.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`type_`
+
+ |
+
+
+`google.ai.generativelanguage.Type`
+
+Required. Data type.
+
+ |
+
+
+
+`format_`
+
+ |
+
+
+`str`
+
+Optional. The format of the data. This is
+used only for primitive datatypes. Supported
+formats:
+
+ for NUMBER type: float, double
+ for INTEGER type: int32, int64
+ for STRING type: enum
+
+ |
+
+
+
+`description`
+
+ |
+
+
+`str`
+
+Optional. A brief description of the
+parameter. This could contain examples of use.
+Parameter description may be formatted as
+Markdown.
+
+ |
+
+
+
+`nullable`
+
+ |
+
+
+`bool`
+
+Optional. Indicates if the value may be null.
+
+ |
+
+
+
+`enum`
+
+ |
+
+
+`MutableSequence[str]`
+
+Optional. Possible values of the element of Type.STRING with
+enum format. For example we can define an Enum Direction as
+: {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH",
+"WEST"]}
+
+ |
+
+
+
+`items`
+
+ |
+
+
+`google.ai.generativelanguage.Schema`
+
+Optional. Schema of the elements of
+Type.ARRAY.
+
+
+ |
+
+
+
+`max_items`
+
+ |
+
+
+`int`
+
+Optional. Maximum number of the elements for
+Type.ARRAY.
+
+ |
+
+
+
+`min_items`
+
+ |
+
+
+`int`
+
+Optional. Minimum number of the elements for
+Type.ARRAY.
+
+ |
+
+
+
+`properties`
+
+ |
+
+
+`MutableMapping[str, google.ai.generativelanguage.Schema]`
+
+Optional. Properties of Type.OBJECT.
+
+ |
+
+
+
+`required`
+
+ |
+
+
+`MutableSequence[str]`
+
+Optional. Required properties of Type.OBJECT.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class PropertiesEntry`](../../../google/generativeai/protos/Schema/PropertiesEntry.md)
+
diff --git a/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
new file mode 100644
index 000000000..cf7b2299d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Schema/PropertiesEntry.md
@@ -0,0 +1,101 @@
+
+# google.generativeai.protos.Schema.PropertiesEntry
+
+
+
+
+
+
+
+The abstract base class for a message.
+
+
+
+
+
+
+
+Args |
+
+
+
+mapping (Union[dict, ~.Message]): A dictionary or message to be
+used to determine the values for this message.
+
+ |
+
+
+
+
+`ignore_unknown_fields`
+
+ |
+
+
+`Optional(bool`
+
+If True, do not raise errors for
+ unknown fields. Only applied if `mapping` is a mapping type or there
+ are keyword parameters.
+
+ |
+
+
+
+`kwargs`
+
+ |
+
+
+`dict`
+
+Keys and values corresponding to the fields of the
+ message.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`key`
+
+ |
+
+
+`string key`
+
+ |
+
+
+
+`value`
+
+ |
+
+
+`Schema value`
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SearchEntryPoint.md b/docs/api/google/generativeai/protos/SearchEntryPoint.md
new file mode 100644
index 000000000..4f7799326
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SearchEntryPoint.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.SearchEntryPoint
+
+
+
+
+
+
+
+Google search entry point.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`rendered_content`
+
+ |
+
+
+`str`
+
+Optional. Web content snippet that can be
+embedded in a web page or an app webview.
+
+ |
+
+
+
+`sdk_blob`
+
+ |
+
+
+`bytes`
+
+Optional. Base64 encoded JSON representing
+array of tuple.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Segment.md b/docs/api/google/generativeai/protos/Segment.md
new file mode 100644
index 000000000..aaeb2240b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Segment.md
@@ -0,0 +1,91 @@
+
+# google.generativeai.protos.Segment
+
+
+
+
+
+
+
+Segment of the content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`part_index`
+
+ |
+
+
+`int`
+
+Output only. The index of a Part object
+within its parent Content object.
+
+ |
+
+
+
+`start_index`
+
+ |
+
+
+`int`
+
+Output only. Start index in the given Part,
+measured in bytes. Offset from the start of the
+Part, inclusive, starting at zero.
+
+ |
+
+
+
+`end_index`
+
+ |
+
+
+`int`
+
+Output only. End index in the given Part,
+measured in bytes. Offset from the start of the
+Part, exclusive, starting at zero.
+
+ |
+
+
+
+`text`
+
+ |
+
+
+`str`
+
+Output only. The text corresponding to the
+segment from the response.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
new file mode 100644
index 000000000..d565a17a9
--- /dev/null
+++ b/docs/api/google/generativeai/protos/SemanticRetrieverConfig.md
@@ -0,0 +1,106 @@
+
+# google.generativeai.protos.SemanticRetrieverConfig
+
+
+
+
+
+
+
+Configuration for retrieving grounding content from a ``Corpus`` or ``Document`` created using the Semantic Retriever API.
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`source`
+
+ |
+
+
+`str`
+
+Required. Name of the resource for retrieval. Example:
+``corpora/123`` or ``corpora/123/documents/abc``.
+
+ |
+
+
+
+`query`
+
+ |
+
+
+`google.ai.generativelanguage.Content`
+
+Required. Query to use for matching ``Chunk``\ s in the
+given resource by similarity.
+
+ |
+
+
+
+`metadata_filters`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.MetadataFilter]`
+
+Optional. Filters for selecting ``Document``\ s and/or
+``Chunk``\ s from the resource.
+
+ |
+
+
+
+`max_chunks_count`
+
+ |
+
+
+`int`
+
+Optional. Maximum number of relevant ``Chunk``\ s to
+retrieve.
+
+
+ |
+
+
+
+`minimum_relevance_score`
+
+ |
+
+
+`float`
+
+Optional. Minimum relevance score for retrieved relevant
+``Chunk``\ s.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/StringList.md b/docs/api/google/generativeai/protos/StringList.md
new file mode 100644
index 000000000..0a202da5d
--- /dev/null
+++ b/docs/api/google/generativeai/protos/StringList.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.StringList
+
+
+
+
+
+
+
+User provided string values assigned to a single metadata key.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`values`
+
+ |
+
+
+`MutableSequence[str]`
+
+The string values of the metadata to store.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TaskType.md b/docs/api/google/generativeai/protos/TaskType.md
new file mode 100644
index 000000000..d01468a1f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TaskType.md
@@ -0,0 +1,802 @@
+
+# google.generativeai.protos.TaskType
+
+
+
+
+
+
+
+Type of task for which the embedding will be used.
+
+
+google.generativeai.protos.TaskType(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`TASK_TYPE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Unset value, which will default to one of the
+other enum values.
+
+ |
+
+
+
+`RETRIEVAL_QUERY`
+
+ |
+
+
+`1`
+
+Specifies the given text is a query in a
+search/retrieval setting.
+
+ |
+
+
+
+`RETRIEVAL_DOCUMENT`
+
+ |
+
+
+`2`
+
+Specifies the given text is a document from
+the corpus being searched.
+
+ |
+
+
+
+`SEMANTIC_SIMILARITY`
+
+ |
+
+
+`3`
+
+Specifies the given text will be used for
+STS.
+
+ |
+
+
+
+`CLASSIFICATION`
+
+ |
+
+
+`4`
+
+Specifies that the given text will be
+classified.
+
+ |
+
+
+
+`CLUSTERING`
+
+ |
+
+
+`5`
+
+Specifies that the embeddings will be used
+for clustering.
+
+ |
+
+
+
+`QUESTION_ANSWERING`
+
+ |
+
+
+`6`
+
+Specifies that the given text will be used
+for question answering.
+
+ |
+
+
+
+`FACT_VERIFICATION`
+
+ |
+
+
+`7`
+
+Specifies that the given text will be used
+for fact verification.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+CLASSIFICATION
+
+ |
+
+
+``
+
+ |
+
+
+
+CLUSTERING
+
+ |
+
+
+``
+
+ |
+
+
+
+FACT_VERIFICATION
+
+ |
+
+
+``
+
+ |
+
+
+
+QUESTION_ANSWERING
+
+ |
+
+
+``
+
+ |
+
+
+
+RETRIEVAL_DOCUMENT
+
+ |
+
+
+``
+
+ |
+
+
+
+RETRIEVAL_QUERY
+
+ |
+
+
+``
+
+ |
+
+
+
+SEMANTIC_SIMILARITY
+
+ |
+
+
+``
+
+ |
+
+
+
+TASK_TYPE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/TextCompletion.md b/docs/api/google/generativeai/protos/TextCompletion.md
new file mode 100644
index 000000000..09251aef6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TextCompletion.md
@@ -0,0 +1,80 @@
+
+# google.generativeai.protos.TextCompletion
+
+
+
+
+
+
+
+Output text returned from a model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`output`
+
+ |
+
+
+`str`
+
+Output only. The generated text returned from
+the model.
+
+ |
+
+
+
+`safety_ratings`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.SafetyRating]`
+
+Ratings for the safety of a response.
+
+There is at most one rating per category.
+
+ |
+
+
+
+`citation_metadata`
+
+ |
+
+
+`google.ai.generativelanguage.CitationMetadata`
+
+Output only. Citation information for model-generated
+``output`` in this ``TextCompletion``.
+
+This field may be populated with attribution information for
+any text included in the ``output``.
+
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TextPrompt.md b/docs/api/google/generativeai/protos/TextPrompt.md
new file mode 100644
index 000000000..d35842d38
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TextPrompt.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.TextPrompt
+
+
+
+
+
+
+
+Text given to the model as a prompt.
+
+
+
+The Model will use this TextPrompt to Generate a text
+completion.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`text`
+
+ |
+
+
+`str`
+
+Required. The prompt text.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Tool.md b/docs/api/google/generativeai/protos/Tool.md
new file mode 100644
index 000000000..f76d4905f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Tool.md
@@ -0,0 +1,90 @@
+
+# google.generativeai.protos.Tool
+
+
+
+
+
+
+
+Tool details that the model may use to generate response.
+
+
+
+A ``Tool`` is a piece of code that enables the system to interact
+with external systems to perform an action, or set of actions,
+outside of knowledge and scope of the model.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`function_declarations`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.FunctionDeclaration]`
+
+Optional. A list of ``FunctionDeclarations`` available to
+the model that can be used for function calling.
+
+The model or system does not execute the function. Instead
+the defined function may be returned as a
+[FunctionCall][google.ai.generativelanguage.v1beta.Part.function_call]
+with arguments to the client side for execution. The model
+may decide to call a subset of these functions by populating
+[FunctionCall][google.ai.generativelanguage.v1beta.Part.function_call]
+in the response. The next conversation turn may contain a
+[FunctionResponse][google.ai.generativelanguage.v1beta.Part.function_response]
+with the
+[Content.role][google.ai.generativelanguage.v1beta.Content.role]
+"function" generation context for the next model turn.
+
+ |
+
+
+
+`google_search_retrieval`
+
+ |
+
+
+`google.ai.generativelanguage.GoogleSearchRetrieval`
+
+Optional. Retrieval tool that is powered by
+Google search.
+
+ |
+
+
+
+`code_execution`
+
+ |
+
+
+`google.ai.generativelanguage.CodeExecution`
+
+Optional. Enables the model to execute code
+as part of generation.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/ToolConfig.md b/docs/api/google/generativeai/protos/ToolConfig.md
new file mode 100644
index 000000000..a9c3d2b5b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/ToolConfig.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.ToolConfig
+
+
+
+
+
+
+
+The Tool configuration containing parameters for specifying ``Tool`` use in the request.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`function_calling_config`
+
+ |
+
+
+`google.ai.generativelanguage.FunctionCallingConfig`
+
+Optional. Function calling config.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TransferOwnershipRequest.md b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md
new file mode 100644
index 000000000..8ebaf7818
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TransferOwnershipRequest.md
@@ -0,0 +1,63 @@
+
+# google.generativeai.protos.TransferOwnershipRequest
+
+
+
+
+
+
+
+Request to transfer the ownership of the tuned model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Required. The resource name of the tuned model to transfer
+ownership.
+
+Format: ``tunedModels/my-model-id``
+
+ |
+
+
+
+`email_address`
+
+ |
+
+
+`str`
+
+Required. The email address of the user to
+whom the tuned model is being transferred to.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TransferOwnershipResponse.md b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md
new file mode 100644
index 000000000..225a8bef7
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TransferOwnershipResponse.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.protos.TransferOwnershipResponse
+
+
+
+
+
+
+
+Response from ``TransferOwnership``.
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TunedModel.md b/docs/api/google/generativeai/protos/TunedModel.md
new file mode 100644
index 000000000..36ca08291
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TunedModel.md
@@ -0,0 +1,255 @@
+
+# google.generativeai.protos.TunedModel
+
+
+
+
+
+
+
+A fine-tuned model created using ModelService.CreateTunedModel.
+
+
+
+This message has `oneof`_ fields (mutually exclusive fields).
+For each oneof, at most one member field can be set at the same time.
+Setting any member of the oneof automatically clears all other
+members.
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`tuned_model_source`
+
+ |
+
+
+`google.ai.generativelanguage.TunedModelSource`
+
+Optional. TunedModel to use as the starting
+point for training the new model.
+
+This field is a member of `oneof`_ ``source_model``.
+
+ |
+
+
+
+`base_model`
+
+ |
+
+
+`str`
+
+Immutable. The name of the ``Model`` to tune. Example:
+``models/gemini-1.5-flash-001``
+
+This field is a member of `oneof`_ ``source_model``.
+
+ |
+
+
+
+`name`
+
+ |
+
+
+`str`
+
+Output only. The tuned model name. A unique name will be
+generated on create. Example: ``tunedModels/az2mb0bpw6i`` If
+display_name is set on create, the id portion of the name
+will be set by concatenating the words of the display_name
+with hyphens and adding a random portion for uniqueness.
+
+Example:
+
+- display_name = ``Sentence Translator``
+- name = ``tunedModels/sentence-translator-u3b7m``
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+`str`
+
+Optional. The name to display for this model
+in user interfaces. The display name must be up
+to 40 characters including spaces.
+
+ |
+
+
+
+`description`
+
+ |
+
+
+`str`
+
+Optional. A short description of this model.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+`float`
+
+Optional. Controls the randomness of the output.
+
+Values can range over ``[0.0,1.0]``, inclusive. A value
+closer to ``1.0`` will produce responses that are more
+varied, while a value closer to ``0.0`` will typically
+result in less surprising responses from the model.
+
+This value specifies default to be the one used by the base
+model while creating the model.
+
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+`float`
+
+Optional. For Nucleus sampling.
+
+Nucleus sampling considers the smallest set of tokens whose
+probability sum is at least ``top_p``.
+
+This value specifies default to be the one used by the base
+model while creating the model.
+
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+`int`
+
+Optional. For Top-k sampling.
+
+Top-k sampling considers the set of ``top_k`` most probable
+tokens. This value specifies default to be used by the
+backend while making the call to the model.
+
+This value specifies default to be the one used by the base
+model while creating the model.
+
+
+ |
+
+
+
+`state`
+
+ |
+
+
+`google.ai.generativelanguage.TunedModel.State`
+
+Output only. The state of the tuned model.
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when this model
+was created.
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when this model
+was updated.
+
+ |
+
+
+
+`tuning_task`
+
+ |
+
+
+`google.ai.generativelanguage.TuningTask`
+
+Required. The tuning task that creates the
+tuned model.
+
+ |
+
+
+
+`reader_project_numbers`
+
+ |
+
+
+`MutableSequence[int]`
+
+Optional. List of project numbers that have
+read access to the tuned model.
+
+ |
+
+
+
+
+
+## Child Classes
+[`class State`](../../../google/generativeai/types/TunedModelState.md)
+
diff --git a/docs/api/google/generativeai/protos/TunedModelSource.md b/docs/api/google/generativeai/protos/TunedModelSource.md
new file mode 100644
index 000000000..7a5a67e79
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TunedModelSource.md
@@ -0,0 +1,63 @@
+
+# google.generativeai.protos.TunedModelSource
+
+
+
+
+
+
+
+Tuned model as a source for training a new model.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`tuned_model`
+
+ |
+
+
+`str`
+
+Immutable. The name of the ``TunedModel`` to use as the
+starting point for training the new model. Example:
+``tunedModels/my-tuned-model``
+
+ |
+
+
+
+`base_model`
+
+ |
+
+
+`str`
+
+Output only. The name of the base ``Model`` this
+``TunedModel`` was tuned from. Example:
+``models/gemini-1.5-flash-001``
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningExample.md b/docs/api/google/generativeai/protos/TuningExample.md
new file mode 100644
index 000000000..ab5ea66cc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningExample.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.protos.TuningExample
+
+
+
+
+
+
+
+A single example for tuning.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`text_input`
+
+ |
+
+
+`str`
+
+Optional. Text model input.
+
+This field is a member of `oneof`_ ``model_input``.
+
+ |
+
+
+
+`output`
+
+ |
+
+
+`str`
+
+Required. The expected model output.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningExamples.md b/docs/api/google/generativeai/protos/TuningExamples.md
new file mode 100644
index 000000000..7d0f33ed2
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningExamples.md
@@ -0,0 +1,48 @@
+
+# google.generativeai.protos.TuningExamples
+
+
+
+
+
+
+
+A set of tuning examples. Can be training or validation data.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`examples`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.TuningExample]`
+
+Required. The examples. Example input can be
+for text or discuss, but all examples in a set
+must be of the same type.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningSnapshot.md b/docs/api/google/generativeai/protos/TuningSnapshot.md
new file mode 100644
index 000000000..a74aea0fd
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningSnapshot.md
@@ -0,0 +1,87 @@
+
+# google.generativeai.protos.TuningSnapshot
+
+
+
+
+
+
+
+Record for a single tuning step.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`step`
+
+ |
+
+
+`int`
+
+Output only. The tuning step.
+
+ |
+
+
+
+`epoch`
+
+ |
+
+
+`int`
+
+Output only. The epoch this step was part of.
+
+ |
+
+
+
+`mean_loss`
+
+ |
+
+
+`float`
+
+Output only. The mean loss of the training
+examples for this step.
+
+ |
+
+
+
+`compute_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when this metric
+was computed.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/TuningTask.md b/docs/api/google/generativeai/protos/TuningTask.md
new file mode 100644
index 000000000..aab2ccddd
--- /dev/null
+++ b/docs/api/google/generativeai/protos/TuningTask.md
@@ -0,0 +1,103 @@
+
+# google.generativeai.protos.TuningTask
+
+
+
+
+
+
+
+Tuning tasks that create tuned models.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`start_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when tuning this
+model started.
+
+ |
+
+
+
+`complete_time`
+
+ |
+
+
+`google.protobuf.timestamp_pb2.Timestamp`
+
+Output only. The timestamp when tuning this
+model completed.
+
+ |
+
+
+
+`snapshots`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.TuningSnapshot]`
+
+Output only. Metrics collected during tuning.
+
+ |
+
+
+
+`training_data`
+
+ |
+
+
+`google.ai.generativelanguage.Dataset`
+
+Required. Input only. Immutable. The model
+training data.
+
+ |
+
+
+
+`hyperparameters`
+
+ |
+
+
+`google.ai.generativelanguage.Hyperparameters`
+
+Immutable. Hyperparameters controlling the
+tuning process. If not provided, default values
+will be used.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/Type.md b/docs/api/google/generativeai/protos/Type.md
new file mode 100644
index 000000000..6f2263dc5
--- /dev/null
+++ b/docs/api/google/generativeai/protos/Type.md
@@ -0,0 +1,770 @@
+
+# google.generativeai.protos.Type
+
+
+
+
+
+
+
+Type contains the list of OpenAPI data types as defined by https://spec.openapis.org/oas/v3.0.3#data-types
+
+
+google.generativeai.protos.Type(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`TYPE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Not specified, should not be used.
+
+ |
+
+
+
+`STRING`
+
+ |
+
+
+`1`
+
+String type.
+
+ |
+
+
+
+`NUMBER`
+
+ |
+
+
+`2`
+
+Number type.
+
+ |
+
+
+
+`INTEGER`
+
+ |
+
+
+`3`
+
+Integer type.
+
+ |
+
+
+
+`BOOLEAN`
+
+ |
+
+
+`4`
+
+Boolean type.
+
+ |
+
+
+
+`ARRAY`
+
+ |
+
+
+`5`
+
+Array type.
+
+ |
+
+
+
+`OBJECT`
+
+ |
+
+
+`6`
+
+Object type.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+ARRAY
+
+ |
+
+
+``
+
+ |
+
+
+
+BOOLEAN
+
+ |
+
+
+``
+
+ |
+
+
+
+INTEGER
+
+ |
+
+
+``
+
+ |
+
+
+
+NUMBER
+
+ |
+
+
+``
+
+ |
+
+
+
+OBJECT
+
+ |
+
+
+``
+
+ |
+
+
+
+STRING
+
+ |
+
+
+``
+
+ |
+
+
+
+TYPE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
new file mode 100644
index 000000000..f4cd4e31c
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateCachedContentRequest.md
@@ -0,0 +1,59 @@
+
+# google.generativeai.protos.UpdateCachedContentRequest
+
+
+
+
+
+
+
+Request to update CachedContent.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`cached_content`
+
+ |
+
+
+`google.ai.generativelanguage.CachedContent`
+
+Required. The content cache entry to update
+
+ |
+
+
+
+`update_mask`
+
+ |
+
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+The list of fields to update.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateChunkRequest.md b/docs/api/google/generativeai/protos/UpdateChunkRequest.md
new file mode 100644
index 000000000..9dc6d4cbc
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateChunkRequest.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.UpdateChunkRequest
+
+
+
+
+
+
+
+Request to update a ``Chunk``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`chunk`
+
+ |
+
+
+`google.ai.generativelanguage.Chunk`
+
+Required. The ``Chunk`` to update.
+
+ |
+
+
+
+`update_mask`
+
+ |
+
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Currently, this only
+supports updating ``custom_metadata`` and ``data``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateCorpusRequest.md b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md
new file mode 100644
index 000000000..a08ba19e6
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateCorpusRequest.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.UpdateCorpusRequest
+
+
+
+
+
+
+
+Request to update a ``Corpus``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`corpus`
+
+ |
+
+
+`google.ai.generativelanguage.Corpus`
+
+Required. The ``Corpus`` to update.
+
+ |
+
+
+
+`update_mask`
+
+ |
+
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Currently, this only
+supports updating ``display_name``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateDocumentRequest.md b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md
new file mode 100644
index 000000000..9f52e983b
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateDocumentRequest.md
@@ -0,0 +1,60 @@
+
+# google.generativeai.protos.UpdateDocumentRequest
+
+
+
+
+
+
+
+Request to update a ``Document``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`document`
+
+ |
+
+
+`google.ai.generativelanguage.Document`
+
+Required. The ``Document`` to update.
+
+ |
+
+
+
+`update_mask`
+
+ |
+
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Currently, this only
+supports updating ``display_name`` and ``custom_metadata``.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdatePermissionRequest.md b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md
new file mode 100644
index 000000000..e6e64b15f
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdatePermissionRequest.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.protos.UpdatePermissionRequest
+
+
+
+
+
+
+
+Request to update the ``Permission``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`permission`
+
+ |
+
+
+`google.ai.generativelanguage.Permission`
+
+Required. The permission to update.
+
+The permission's ``name`` field is used to identify the
+permission to update.
+
+ |
+
+
+
+`update_mask`
+
+ |
+
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update. Accepted ones:
+
+- role (Permission.role field)
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
new file mode 100644
index 000000000..f30c5b7bb
--- /dev/null
+++ b/docs/api/google/generativeai/protos/UpdateTunedModelRequest.md
@@ -0,0 +1,59 @@
+
+# google.generativeai.protos.UpdateTunedModelRequest
+
+
+
+
+
+
+
+Request to update a TunedModel.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`tuned_model`
+
+ |
+
+
+`google.ai.generativelanguage.TunedModel`
+
+Required. The tuned model to update.
+
+ |
+
+
+
+`update_mask`
+
+ |
+
+
+`google.protobuf.field_mask_pb2.FieldMask`
+
+Required. The list of fields to update.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/protos/VideoMetadata.md b/docs/api/google/generativeai/protos/VideoMetadata.md
new file mode 100644
index 000000000..20dfda9cf
--- /dev/null
+++ b/docs/api/google/generativeai/protos/VideoMetadata.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.protos.VideoMetadata
+
+
+
+
+
+
+
+Metadata for a video ``File``.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`video_duration`
+
+ |
+
+
+`google.protobuf.duration_pb2.Duration`
+
+Duration of the video.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types.md b/docs/api/google/generativeai/types.md
new file mode 100644
index 000000000..a9e502516
--- /dev/null
+++ b/docs/api/google/generativeai/types.md
@@ -0,0 +1,155 @@
+
+# Module: google.generativeai.types
+
+
+
+
+
+
+
+A collection of type definitions used throughout the library.
+
+
+
+## Classes
+
+[`class AsyncGenerateContentResponse`](../../google/generativeai/types/AsyncGenerateContentResponse.md): This is the async version of `genai.GenerateContentResponse`.
+
+[`class BlobDict`](../../google/generativeai/types/BlobDict.md)
+
+[`class BlockedPromptException`](../../google/generativeai/types/BlockedPromptException.md): Common base class for all non-exit exceptions.
+
+[`class BlockedReason`](../../google/generativeai/types/BlockedReason.md): A list of reasons why content may have been blocked.
+
+[`class BrokenResponseError`](../../google/generativeai/types/BrokenResponseError.md): Common base class for all non-exit exceptions.
+
+[`class CallableFunctionDeclaration`](../../google/generativeai/types/CallableFunctionDeclaration.md): An extension of `FunctionDeclaration` that can be built from a python function, and is callable.
+
+[`class CitationMetadataDict`](../../google/generativeai/types/CitationMetadataDict.md): A collection of source attributions for a piece of content.
+
+[`class CitationSourceDict`](../../google/generativeai/types/CitationSourceDict.md): A citation to a source for a portion of a specific response.
+
+[`class ContentDict`](../../google/generativeai/types/ContentDict.md)
+
+[`class ContentFilterDict`](../../google/generativeai/types/ContentFilterDict.md): Content filtering metadata associated with processing a single request.
+
+[`class File`](../../google/generativeai/types/File.md)
+
+[`class FileDataDict`](../../google/generativeai/types/FileDataDict.md)
+
+[`class FunctionDeclaration`](../../google/generativeai/types/FunctionDeclaration.md)
+
+[`class FunctionLibrary`](../../google/generativeai/types/FunctionLibrary.md): A container for a set of `Tool` objects, manages lookup and execution of their functions.
+
+[`class GenerateContentResponse`](../../google/generativeai/types/GenerateContentResponse.md): Instances of this class manage the response of the `generate_content` method.
+
+[`class GenerationConfig`](../../google/generativeai/types/GenerationConfig.md): A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+[`class GenerationConfigDict`](../../google/generativeai/types/GenerationConfigDict.md)
+
+[`class HarmBlockThreshold`](../../google/generativeai/types/HarmBlockThreshold.md): Block at and beyond a specified harm probability.
+
+[`class HarmCategory`](../../google/generativeai/types/HarmCategory.md): Harm Categories supported by the gemini-family model
+
+[`class HarmProbability`](../../google/generativeai/types/HarmProbability.md): The probability that a piece of content is harmful.
+
+[`class IncompleteIterationError`](../../google/generativeai/types/IncompleteIterationError.md): Common base class for all non-exit exceptions.
+
+[`class Model`](../../google/generativeai/types/Model.md): A dataclass representation of a protos.Model
.
+
+[`class PartDict`](../../google/generativeai/types/PartDict.md)
+
+[`class Permission`](../../google/generativeai/types/Permission.md): A permission to access a resource.
+
+[`class Permissions`](../../google/generativeai/types/Permissions.md)
+
+[`class RequestOptions`](../../google/generativeai/types/RequestOptions.md): Request options
+
+[`class SafetyFeedbackDict`](../../google/generativeai/types/SafetyFeedbackDict.md): Safety feedback for an entire request.
+
+[`class SafetyRatingDict`](../../google/generativeai/types/SafetyRatingDict.md): Safety rating for a piece of content.
+
+[`class SafetySettingDict`](../../google/generativeai/types/SafetySettingDict.md): Safety setting, affecting the safety-blocking behavior.
+
+[`class Status`](../../google/generativeai/types/Status.md): A ProtocolMessage
+
+[`class StopCandidateException`](../../google/generativeai/types/StopCandidateException.md): Common base class for all non-exit exceptions.
+
+[`class Tool`](../../google/generativeai/types/Tool.md): A wrapper for protos.Tool
, Contains a collection of related `FunctionDeclaration` objects, protos.CodeExecution object, and protos.GoogleSearchRetrieval object.
+
+[`class ToolDict`](../../google/generativeai/types/ToolDict.md)
+
+[`class TunedModel`](../../google/generativeai/types/TunedModel.md): A dataclass representation of a protos.TunedModel
.
+
+[`class TunedModelState`](../../google/generativeai/types/TunedModelState.md): The state of the tuned model.
+
+## Functions
+
+[`TypedDict(...)`](../../google/generativeai/types/TypedDict.md): A simple typed namespace. At runtime it is equivalent to a plain dict.
+
+[`get_default_file_client(...)`](../../google/generativeai/types/get_default_file_client.md)
+
+[`to_file_data(...)`](../../google/generativeai/types/to_file_data.md)
+
+## Type Aliases
+
+[`AnyModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md)
+
+[`BaseModelNameOptions`](../../google/generativeai/types/BaseModelNameOptions.md)
+
+[`BlobType`](../../google/generativeai/types/BlobType.md)
+
+[`ContentType`](../../google/generativeai/types/ContentType.md)
+
+[`ContentsType`](../../google/generativeai/types/ContentsType.md)
+
+[`FileDataType`](../../google/generativeai/types/FileDataType.md)
+
+[`FunctionDeclarationType`](../../google/generativeai/types/FunctionDeclarationType.md)
+
+[`FunctionLibraryType`](../../google/generativeai/types/FunctionLibraryType.md)
+
+[`GenerationConfigType`](../../google/generativeai/types/GenerationConfigType.md)
+
+[`ModelNameOptions`](../../google/generativeai/types/AnyModelNameOptions.md)
+
+[`ModelsIterable`](../../google/generativeai/types/ModelsIterable.md)
+
+[`PartType`](../../google/generativeai/types/PartType.md)
+
+[`RequestOptionsType`](../../google/generativeai/types/RequestOptionsType.md)
+
+[`StrictContentType`](../../google/generativeai/types/StrictContentType.md)
+
+[`ToolsType`](../../google/generativeai/types/ToolsType.md)
+
+[`TunedModelNameOptions`](../../google/generativeai/types/TunedModelNameOptions.md)
+
+
+
+
+
+
+Other Members |
+
+
+
+
+annotations
+
+ |
+
+
+Instance of `__future__._Feature`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/AnyModelNameOptions.md b/docs/api/google/generativeai/types/AnyModelNameOptions.md
new file mode 100644
index 000000000..e77c9f771
--- /dev/null
+++ b/docs/api/google/generativeai/types/AnyModelNameOptions.md
@@ -0,0 +1,23 @@
+
+# google.generativeai.types.AnyModelNameOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+AnyModelNameOptions = Union[
+ str,
+ google.generativeai.types.Model
,
+ google.generativeai.protos.Model
,
+ google.generativeai.types.TunedModel
,
+ google.generativeai.protos.TunedModel
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
new file mode 100644
index 000000000..e67806a80
--- /dev/null
+++ b/docs/api/google/generativeai/types/AsyncGenerateContentResponse.md
@@ -0,0 +1,161 @@
+
+# google.generativeai.types.AsyncGenerateContentResponse
+
+
+
+
+
+
+
+This is the async version of `genai.GenerateContentResponse`.
+
+
+google.generativeai.types.AsyncGenerateContentResponse(
+ done: bool,
+ iterator: (None | Iterable[protos.GenerateContentResponse] | AsyncIterable[protos.
+ GenerateContentResponse]),
+ result: protos.GenerateContentResponse,
+ chunks: (Iterable[protos.GenerateContentResponse] | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidates`
+
+ |
+
+
+The list of candidate responses.
+
+ |
+
+
+
+`parts`
+
+ |
+
+
+A quick accessor equivalent to `self.candidates[0].content.parts`
+
+ |
+
+
+
+`prompt_feedback`
+
+ |
+
+
+
+
+ |
+
+
+
+`text`
+
+ |
+
+
+A quick accessor equivalent to `self.candidates[0].content.parts[0].text`
+
+ |
+
+
+
+`usage_metadata`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_aiterator
+
+View source
+
+
+from_aiterator(
+ iterator
+)
+
+
+
+
+
+from_response
+
+View source
+
+
+@classmethod
+from_response(
+ response: protos.GenerateContentResponse
+)
+
+
+
+
+
+resolve
+
+View source
+
+
+resolve()
+
+
+
+
+
+to_dict
+
+View source
+
+
+to_dict()
+
+
+Returns the result as a JSON-compatible dict.
+
+Note: This doesn't capture the iterator state when streaming, it only captures the accumulated
+`GenerateContentResponse` fields.
+
+```
+>>> import json
+>>> response = model.generate_content('Hello?')
+>>> json.dumps(response.to_dict())
+```
+
+
+
diff --git a/docs/api/google/generativeai/types/BaseModelNameOptions.md b/docs/api/google/generativeai/types/BaseModelNameOptions.md
new file mode 100644
index 000000000..0df2d0a32
--- /dev/null
+++ b/docs/api/google/generativeai/types/BaseModelNameOptions.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.BaseModelNameOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+BaseModelNameOptions = Union[
+ str,
+ google.generativeai.types.Model
,
+ google.generativeai.protos.Model
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlobDict.md b/docs/api/google/generativeai/types/BlobDict.md
new file mode 100644
index 000000000..a2bc987e7
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlobDict.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.BlobDict
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlobType.md b/docs/api/google/generativeai/types/BlobType.md
new file mode 100644
index 000000000..213c89d53
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlobType.md
@@ -0,0 +1,22 @@
+
+# google.generativeai.types.BlobType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+BlobType = Union[
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlockedPromptException.md b/docs/api/google/generativeai/types/BlockedPromptException.md
new file mode 100644
index 000000000..18c1628d3
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlockedPromptException.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.BlockedPromptException
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/BlockedReason.md b/docs/api/google/generativeai/types/BlockedReason.md
new file mode 100644
index 000000000..8c4f2d822
--- /dev/null
+++ b/docs/api/google/generativeai/types/BlockedReason.md
@@ -0,0 +1,683 @@
+
+# google.generativeai.types.BlockedReason
+
+
+
+
+
+
+
+A list of reasons why content may have been blocked.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.ContentFilter.BlockedReason`
+
+
+
+
+google.generativeai.types.BlockedReason(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`BLOCKED_REASON_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+A blocked reason was not specified.
+
+ |
+
+
+
+`SAFETY`
+
+ |
+
+
+`1`
+
+Content was blocked by safety settings.
+
+ |
+
+
+
+`OTHER`
+
+ |
+
+
+`2`
+
+Content was blocked, but the reason is
+uncategorized.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+BLOCKED_REASON_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+OTHER
+
+ |
+
+
+``
+
+ |
+
+
+
+SAFETY
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/BrokenResponseError.md b/docs/api/google/generativeai/types/BrokenResponseError.md
new file mode 100644
index 000000000..2a126a482
--- /dev/null
+++ b/docs/api/google/generativeai/types/BrokenResponseError.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.BrokenResponseError
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/CallableFunctionDeclaration.md b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md
new file mode 100644
index 000000000..346aad9d5
--- /dev/null
+++ b/docs/api/google/generativeai/types/CallableFunctionDeclaration.md
@@ -0,0 +1,145 @@
+
+# google.generativeai.types.CallableFunctionDeclaration
+
+
+
+
+
+
+
+An extension of `FunctionDeclaration` that can be built from a python function, and is callable.
+
+Inherits From: [`FunctionDeclaration`](../../../google/generativeai/types/FunctionDeclaration.md)
+
+
+google.generativeai.types.CallableFunctionDeclaration(
+ *,
+ name: str,
+ description: str,
+ parameters: (dict[str, Any] | None) = None,
+ function: Callable[..., Any]
+)
+
+
+
+
+
+
+Note: The python function must have type annotations.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`description`
+
+ |
+
+
+
+
+ |
+
+
+
+`name`
+
+ |
+
+
+
+
+ |
+
+
+
+`parameters`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_function
+
+View source
+
+
+@staticmethod
+from_function(
+ function: Callable[..., Any], descriptions: (dict[str, str] | None) = None
+)
+
+
+Builds a `CallableFunctionDeclaration` from a python function.
+
+The function should have type annotations.
+
+This method is able to generate the schema for arguments annotated with types:
+
+`AllowedTypes = float | int | str | list[AllowedTypes] | dict`
+
+This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary
+contents. But you can build these manually.
+
+from_proto
+
+View source
+
+
+@classmethod
+from_proto(
+ proto
+) -> FunctionDeclaration
+
+
+
+
+
+to_proto
+
+View source
+
+
+to_proto() -> protos.FunctionDeclaration
+
+
+
+
+
+__call__
+
+View source
+
+
+__call__(
+ fc: protos.FunctionCall
+) -> protos.FunctionResponse
+
+
+Call self as a function.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/CitationMetadataDict.md b/docs/api/google/generativeai/types/CitationMetadataDict.md
new file mode 100644
index 000000000..ea848145d
--- /dev/null
+++ b/docs/api/google/generativeai/types/CitationMetadataDict.md
@@ -0,0 +1,46 @@
+
+# google.generativeai.types.CitationMetadataDict
+
+
+
+
+
+
+
+A collection of source attributions for a piece of content.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`citation_sources`
+
+ |
+
+
+`MutableSequence[google.ai.generativelanguage.CitationSource]`
+
+Citations to sources for a specific response.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/CitationSourceDict.md b/docs/api/google/generativeai/types/CitationSourceDict.md
new file mode 100644
index 000000000..71bb9d35d
--- /dev/null
+++ b/docs/api/google/generativeai/types/CitationSourceDict.md
@@ -0,0 +1,94 @@
+
+# google.generativeai.types.CitationSourceDict
+
+
+
+
+
+
+
+A citation to a source for a portion of a specific response.
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`start_index`
+
+ |
+
+
+`int`
+
+Optional. Start of segment of the response
+that is attributed to this source.
+
+Index indicates the start of the segment,
+measured in bytes.
+
+ |
+
+
+
+`end_index`
+
+ |
+
+
+`int`
+
+Optional. End of the attributed segment,
+exclusive.
+
+ |
+
+
+
+`uri`
+
+ |
+
+
+`str`
+
+Optional. URI that is attributed as a source
+for a portion of the text.
+
+ |
+
+
+
+`license_`
+
+ |
+
+
+`str`
+
+Optional. License for the GitHub project that
+is attributed as a source for segment.
+
+License info is required for code citations.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentDict.md b/docs/api/google/generativeai/types/ContentDict.md
new file mode 100644
index 000000000..19da67166
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentDict.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.ContentDict
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentFilterDict.md b/docs/api/google/generativeai/types/ContentFilterDict.md
new file mode 100644
index 000000000..fa1da46bb
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentFilterDict.md
@@ -0,0 +1,64 @@
+
+# google.generativeai.types.ContentFilterDict
+
+
+
+
+
+
+
+Content filtering metadata associated with processing a single request.
+
+
+ContentFilter contains a reason and an optional supporting
+string. The reason may be unspecified.
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`reason`
+
+ |
+
+
+`google.ai.generativelanguage.ContentFilter.BlockedReason`
+
+The reason content was blocked during request
+processing.
+
+ |
+
+
+
+`message`
+
+ |
+
+
+`str`
+
+A string that describes the filtering
+behavior in more detail.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentType.md b/docs/api/google/generativeai/types/ContentType.md
new file mode 100644
index 000000000..a9967a899
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentType.md
@@ -0,0 +1,34 @@
+
+# google.generativeai.types.ContentType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ContentType = Union[
+ google.generativeai.protos.Content
,
+ google.generativeai.types.ContentDict
,
+ Iterable[google.generativeai.types.PartType
],
+ google.generativeai.protos.Part
,
+ google.generativeai.types.PartDict
,
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image,
+ str,
+ google.generativeai.protos.FunctionCall
,
+ google.generativeai.protos.FunctionResponse
,
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ContentsType.md b/docs/api/google/generativeai/types/ContentsType.md
new file mode 100644
index 000000000..dcfea9aeb
--- /dev/null
+++ b/docs/api/google/generativeai/types/ContentsType.md
@@ -0,0 +1,36 @@
+
+# google.generativeai.types.ContentsType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ContentsType = Union[
+ google.generativeai.protos.Content
,
+ google.generativeai.types.ContentDict
,
+ Iterable[google.generativeai.types.PartType
],
+ google.generativeai.protos.Part
,
+ google.generativeai.types.PartDict
,
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image,
+ str,
+ google.generativeai.protos.FunctionCall
,
+ google.generativeai.protos.FunctionResponse
,
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
,
+ Iterable[google.generativeai.types.StrictContentType
],
+ NoneType
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/File.md b/docs/api/google/generativeai/types/File.md
new file mode 100644
index 000000000..627659d71
--- /dev/null
+++ b/docs/api/google/generativeai/types/File.md
@@ -0,0 +1,210 @@
+
+# google.generativeai.types.File
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.File(
+ proto: (protos.File | File | dict)
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`create_time`
+
+ |
+
+
+
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+
+
+ |
+
+
+
+`error`
+
+ |
+
+
+
+
+ |
+
+
+
+`expiration_time`
+
+ |
+
+
+
+
+ |
+
+
+
+`mime_type`
+
+ |
+
+
+
+
+ |
+
+
+
+`name`
+
+ |
+
+
+
+
+ |
+
+
+
+`sha256_hash`
+
+ |
+
+
+
+
+ |
+
+
+
+`size_bytes`
+
+ |
+
+
+
+
+ |
+
+
+
+`state`
+
+ |
+
+
+
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+
+
+ |
+
+
+
+`uri`
+
+ |
+
+
+
+
+ |
+
+
+
+`video_metadata`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+delete
+
+View source
+
+
+delete()
+
+
+
+
+
+to_dict
+
+View source
+
+
+to_dict() -> dict[str, Any]
+
+
+
+
+
+to_proto
+
+View source
+
+
+to_proto() -> protos.File
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FileDataDict.md b/docs/api/google/generativeai/types/FileDataDict.md
new file mode 100644
index 000000000..39ba63522
--- /dev/null
+++ b/docs/api/google/generativeai/types/FileDataDict.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.FileDataDict
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FileDataType.md b/docs/api/google/generativeai/types/FileDataType.md
new file mode 100644
index 000000000..12f0f5d29
--- /dev/null
+++ b/docs/api/google/generativeai/types/FileDataType.md
@@ -0,0 +1,22 @@
+
+# google.generativeai.types.FileDataType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+FileDataType = Union[
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionDeclaration.md b/docs/api/google/generativeai/types/FunctionDeclaration.md
new file mode 100644
index 000000000..768502bdc
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionDeclaration.md
@@ -0,0 +1,125 @@
+
+# google.generativeai.types.FunctionDeclaration
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.FunctionDeclaration(
+ *, name: str, description: str, parameters: (dict[str, Any] | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`description`
+
+ |
+
+
+
+
+ |
+
+
+
+`name`
+
+ |
+
+
+
+
+ |
+
+
+
+`parameters`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_function
+
+View source
+
+
+@staticmethod
+from_function(
+ function: Callable[..., Any], descriptions: (dict[str, str] | None) = None
+)
+
+
+Builds a `CallableFunctionDeclaration` from a python function.
+
+The function should have type annotations.
+
+This method is able to generate the schema for arguments annotated with types:
+
+`AllowedTypes = float | int | str | list[AllowedTypes] | dict`
+
+This method does not yet build a schema for `TypedDict`, that would allow you to specify the dictionary
+contents. But you can build these manually.
+
+from_proto
+
+View source
+
+
+@classmethod
+from_proto(
+ proto
+) -> FunctionDeclaration
+
+
+
+
+
+to_proto
+
+View source
+
+
+to_proto() -> protos.FunctionDeclaration
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionDeclarationType.md b/docs/api/google/generativeai/types/FunctionDeclarationType.md
new file mode 100644
index 000000000..4bce40d90
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionDeclarationType.md
@@ -0,0 +1,22 @@
+
+# google.generativeai.types.FunctionDeclarationType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+FunctionDeclarationType = Union[
+ google.generativeai.types.FunctionDeclaration
,
+ google.generativeai.protos.FunctionDeclaration
,
+ dict[str, Any],
+ Callable[..., Any]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionLibrary.md b/docs/api/google/generativeai/types/FunctionLibrary.md
new file mode 100644
index 000000000..ab6e978a6
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionLibrary.md
@@ -0,0 +1,70 @@
+
+# google.generativeai.types.FunctionLibrary
+
+
+
+
+
+
+
+A container for a set of `Tool` objects, manages lookup and execution of their functions.
+
+
+google.generativeai.types.FunctionLibrary(
+ tools: Iterable[ToolType]
+)
+
+
+
+
+
+
+
+## Methods
+
+to_proto
+
+View source
+
+
+to_proto()
+
+
+
+
+
+__call__
+
+View source
+
+
+__call__(
+ fc: protos.FunctionCall
+) -> (protos.Part | None)
+
+
+Call self as a function.
+
+
+__getitem__
+
+View source
+
+
+__getitem__(
+ name: (str | protos.FunctionCall)
+) -> (FunctionDeclaration | protos.FunctionDeclaration)
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/FunctionLibraryType.md b/docs/api/google/generativeai/types/FunctionLibraryType.md
new file mode 100644
index 000000000..a8b931f95
--- /dev/null
+++ b/docs/api/google/generativeai/types/FunctionLibraryType.md
@@ -0,0 +1,29 @@
+
+# google.generativeai.types.FunctionLibraryType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+FunctionLibraryType = Union[
+ google.generativeai.types.FunctionLibrary
,
+ Iterable[Union[str, google.generativeai.types.Tool
, google.generativeai.protos.Tool
, google.generativeai.types.ToolDict
, Iterable[google.generativeai.types.FunctionDeclarationType
], google.generativeai.types.FunctionDeclaration
, google.generativeai.protos.FunctionDeclaration
, dict[str, Any], Callable[..., Any]]],
+ str,
+ google.generativeai.types.Tool
,
+ google.generativeai.protos.Tool
,
+ google.generativeai.types.ToolDict
,
+ Iterable[google.generativeai.types.FunctionDeclarationType
],
+ google.generativeai.types.FunctionDeclaration
,
+ google.generativeai.protos.FunctionDeclaration
,
+ dict[str, Any],
+ Callable[..., Any]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerateContentResponse.md b/docs/api/google/generativeai/types/GenerateContentResponse.md
new file mode 100644
index 000000000..eee32531b
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerateContentResponse.md
@@ -0,0 +1,193 @@
+
+# google.generativeai.types.GenerateContentResponse
+
+
+
+
+
+
+
+Instances of this class manage the response of the `generate_content` method.
+
+
+google.generativeai.types.GenerateContentResponse(
+ done: bool,
+ iterator: (None | Iterable[protos.GenerateContentResponse] | AsyncIterable[protos.
+ GenerateContentResponse]),
+ result: protos.GenerateContentResponse,
+ chunks: (Iterable[protos.GenerateContentResponse] | None) = None
+)
+
+
+
+
+
+
+These are returned by GenerativeModel.generate_content
and ChatSession.send_message
.
+This object is based on the low level protos.GenerateContentResponse
class which just has `prompt_feedback`
+and `candidates` attributes. This class adds several quick accessors for common use cases.
+
+The same object type is returned for both `stream=True/False`.
+
+### Streaming
+
+When you pass `stream=True` to GenerativeModel.generate_content
or ChatSession.send_message
,
+iterate over this object to receive chunks of the response:
+
+```
+response = model.generate_content(..., stream=True):
+for chunk in response:
+ print(chunk.text)
+```
+
+GenerateContentResponse.prompt_feedback
is available immediately but
+GenerateContentResponse.candidates
, and all the attributes derived from them (`.text`, `.parts`),
+are only available after the iteration is complete.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidates`
+
+ |
+
+
+The list of candidate responses.
+
+ |
+
+
+
+`parts`
+
+ |
+
+
+A quick accessor equivalent to `self.candidates[0].content.parts`
+
+ |
+
+
+
+`prompt_feedback`
+
+ |
+
+
+
+
+ |
+
+
+
+`text`
+
+ |
+
+
+A quick accessor equivalent to `self.candidates[0].content.parts[0].text`
+
+ |
+
+
+
+`usage_metadata`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+from_iterator
+
+View source
+
+
+@classmethod
+from_iterator(
+ iterator: Iterable[protos.GenerateContentResponse]
+)
+
+
+
+
+
+from_response
+
+View source
+
+
+@classmethod
+from_response(
+ response: protos.GenerateContentResponse
+)
+
+
+
+
+
+resolve
+
+View source
+
+
+resolve()
+
+
+
+
+
+to_dict
+
+View source
+
+
+to_dict()
+
+
+Returns the result as a JSON-compatible dict.
+
+Note: This doesn't capture the iterator state when streaming, it only captures the accumulated
+`GenerateContentResponse` fields.
+
+```
+>>> import json
+>>> response = model.generate_content('Hello?')
+>>> json.dumps(response.to_dict())
+```
+
+__iter__
+
+View source
+
+
+__iter__()
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerationConfig.md b/docs/api/google/generativeai/types/GenerationConfig.md
new file mode 100644
index 000000000..429a8a639
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerationConfig.md
@@ -0,0 +1,419 @@
+
+# google.generativeai.types.GenerationConfig
+
+
+
+
+
+
+
+A simple dataclass used to configure the generation parameters of GenerativeModel.generate_content
.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.GenerationConfig`
+
+
+
+
+google.generativeai.types.GenerationConfig(
+ candidate_count: (int | None) = None,
+ stop_sequences: (Iterable[str] | None) = None,
+ max_output_tokens: (int | None) = None,
+ temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (int | None) = None,
+ seed: (int | None) = None,
+ response_mime_type: (str | None) = None,
+ response_schema: (protos.Schema | Mapping[str, Any] | type | None) = None,
+ presence_penalty: (float | None) = None,
+ frequency_penalty: (float | None) = None,
+ response_logprobs: (bool | None) = None,
+ logprobs: (int | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`candidate_count`
+
+ |
+
+
+ Number of generated responses to return.
+
+ |
+
+
+
+`stop_sequences`
+
+ |
+
+
+ The set of character sequences (up
+to 5) that will stop output generation. If
+specified, the API will stop at the first
+appearance of a stop sequence. The stop sequence
+will not be included as part of the response.
+
+ |
+
+
+
+`max_output_tokens`
+
+ |
+
+
+ The maximum number of tokens to include in a
+candidate.
+
+If unset, this will default to output_token_limit specified
+in the model's specification.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+ Controls the randomness of the output. Note: The
+default value varies by model, see the Model.temperature
+attribute of the `Model` returned the `genai.get_model`
+function.
+
+Values can range from [0.0,1.0], inclusive. A value closer
+to 1.0 will produce responses that are more varied and
+creative, while a value closer to 0.0 will typically result
+in more straightforward responses from the model.
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+ Optional. The maximum cumulative probability of tokens to
+consider when sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Tokens are sorted based on their assigned probabilities so
+that only the most likely tokens are considered. Top-k
+sampling directly limits the maximum number of tokens to
+consider, while Nucleus sampling limits number of tokens
+based on the cumulative probability.
+
+Note: The default value varies by model, see the
+Model.top_p attribute of the `Model` returned the
+`genai.get_model` function.
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+`int`
+
+Optional. The maximum number of tokens to consider when
+sampling.
+
+The model uses combined Top-k and nucleus sampling.
+
+Top-k sampling considers the set of `top_k` most probable
+tokens. Defaults to 40.
+
+Note: The default value varies by model, see the
+Model.top_k attribute of the `Model` returned the
+`genai.get_model` function.
+
+ |
+
+
+
+`seed`
+
+ |
+
+
+ Optional. Seed used in decoding. If not set, the request uses a randomly generated seed.
+
+ |
+
+
+
+`response_mime_type`
+
+ |
+
+
+ Optional. Output response mimetype of the generated candidate text.
+
+Supported mimetype:
+ `text/plain`: (default) Text output.
+ `text/x-enum`: for use with a string-enum in `response_schema`
+ `application/json`: JSON response in the candidates.
+
+ |
+
+
+
+`response_schema`
+
+ |
+
+
+ Optional. Specifies the format of the JSON requested if response_mime_type is
+`application/json`.
+
+ |
+
+
+
+`presence_penalty`
+
+ |
+
+
+ Optional.
+
+ |
+
+
+
+`frequency_penalty`
+
+ |
+
+
+ Optional.
+
+ |
+
+
+
+`response_logprobs`
+
+ |
+
+
+ Optional. If true, export the `logprobs` results in response.
+
+ |
+
+
+
+`logprobs`
+
+ |
+
+
+ Optional. Number of candidates of log probabilities to return at each step of decoding.
+
+ |
+
+
+
+
+
+## Methods
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+candidate_count
+
+ |
+
+
+`None`
+
+ |
+
+
+
+frequency_penalty
+
+ |
+
+
+`None`
+
+ |
+
+
+
+logprobs
+
+ |
+
+
+`None`
+
+ |
+
+
+
+max_output_tokens
+
+ |
+
+
+`None`
+
+ |
+
+
+
+presence_penalty
+
+ |
+
+
+`None`
+
+ |
+
+
+
+response_logprobs
+
+ |
+
+
+`None`
+
+ |
+
+
+
+response_mime_type
+
+ |
+
+
+`None`
+
+ |
+
+
+
+response_schema
+
+ |
+
+
+`None`
+
+ |
+
+
+
+seed
+
+ |
+
+
+`None`
+
+ |
+
+
+
+stop_sequences
+
+ |
+
+
+`None`
+
+ |
+
+
+
+temperature
+
+ |
+
+
+`None`
+
+ |
+
+
+
+top_k
+
+ |
+
+
+`None`
+
+ |
+
+
+
+top_p
+
+ |
+
+
+`None`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerationConfigDict.md b/docs/api/google/generativeai/types/GenerationConfigDict.md
new file mode 100644
index 000000000..f62f4b12d
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerationConfigDict.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.GenerationConfigDict
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/GenerationConfigType.md b/docs/api/google/generativeai/types/GenerationConfigType.md
new file mode 100644
index 000000000..a182edb70
--- /dev/null
+++ b/docs/api/google/generativeai/types/GenerationConfigType.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.GenerationConfigType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+GenerationConfigType = Union[
+ google.generativeai.protos.GenerationConfig
,
+ google.generativeai.types.GenerationConfigDict
,
+ google.generativeai.types.GenerationConfig
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/HarmBlockThreshold.md b/docs/api/google/generativeai/types/HarmBlockThreshold.md
new file mode 100644
index 000000000..05992883d
--- /dev/null
+++ b/docs/api/google/generativeai/types/HarmBlockThreshold.md
@@ -0,0 +1,756 @@
+
+# google.generativeai.types.HarmBlockThreshold
+
+
+
+
+
+
+
+Block at and beyond a specified harm probability.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.SafetySetting.HarmBlockThreshold`
+
+
+
+
+google.generativeai.types.HarmBlockThreshold(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`HARM_BLOCK_THRESHOLD_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Threshold is unspecified.
+
+ |
+
+
+
+`BLOCK_LOW_AND_ABOVE`
+
+ |
+
+
+`1`
+
+Content with NEGLIGIBLE will be allowed.
+
+ |
+
+
+
+`BLOCK_MEDIUM_AND_ABOVE`
+
+ |
+
+
+`2`
+
+Content with NEGLIGIBLE and LOW will be
+allowed.
+
+ |
+
+
+
+`BLOCK_ONLY_HIGH`
+
+ |
+
+
+`3`
+
+Content with NEGLIGIBLE, LOW, and MEDIUM will
+be allowed.
+
+ |
+
+
+
+`BLOCK_NONE`
+
+ |
+
+
+`4`
+
+All content will be allowed.
+
+ |
+
+
+
+`OFF`
+
+ |
+
+
+`5`
+
+Turn off the safety filter.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+BLOCK_LOW_AND_ABOVE
+
+ |
+
+
+``
+
+ |
+
+
+
+BLOCK_MEDIUM_AND_ABOVE
+
+ |
+
+
+``
+
+ |
+
+
+
+BLOCK_NONE
+
+ |
+
+
+``
+
+ |
+
+
+
+BLOCK_ONLY_HIGH
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_BLOCK_THRESHOLD_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+OFF
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/HarmCategory.md b/docs/api/google/generativeai/types/HarmCategory.md
new file mode 100644
index 000000000..bea90dd90
--- /dev/null
+++ b/docs/api/google/generativeai/types/HarmCategory.md
@@ -0,0 +1,647 @@
+
+# google.generativeai.types.HarmCategory
+
+
+
+
+
+
+
+Harm Categories supported by the gemini-family model
+
+
+google.generativeai.types.HarmCategory(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+HARM_CATEGORY_DANGEROUS_CONTENT
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_HARASSMENT
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_HATE_SPEECH
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_SEXUALLY_EXPLICIT
+
+ |
+
+
+``
+
+ |
+
+
+
+HARM_CATEGORY_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/HarmProbability.md b/docs/api/google/generativeai/types/HarmProbability.md
new file mode 100644
index 000000000..1d7e2f6d2
--- /dev/null
+++ b/docs/api/google/generativeai/types/HarmProbability.md
@@ -0,0 +1,734 @@
+
+# google.generativeai.types.HarmProbability
+
+
+
+
+
+
+
+The probability that a piece of content is harmful.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.SafetyRating.HarmProbability`
+
+
+
+
+google.generativeai.types.HarmProbability(
+ *args, **kwds
+)
+
+
+
+
+
+
+The classification system gives the probability of the content
+being unsafe. This does not indicate the severity of harm for a
+piece of content.
+
+
+
+
+Values |
+
+
+
+
+`HARM_PROBABILITY_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+Probability is unspecified.
+
+ |
+
+
+
+`NEGLIGIBLE`
+
+ |
+
+
+`1`
+
+Content has a negligible chance of being
+unsafe.
+
+ |
+
+
+
+`LOW`
+
+ |
+
+
+`2`
+
+Content has a low chance of being unsafe.
+
+ |
+
+
+
+`MEDIUM`
+
+ |
+
+
+`3`
+
+Content has a medium chance of being unsafe.
+
+ |
+
+
+
+`HIGH`
+
+ |
+
+
+`4`
+
+Content has a high chance of being unsafe.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+HARM_PROBABILITY_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
+HIGH
+
+ |
+
+
+``
+
+ |
+
+
+
+LOW
+
+ |
+
+
+``
+
+ |
+
+
+
+MEDIUM
+
+ |
+
+
+``
+
+ |
+
+
+
+NEGLIGIBLE
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/IncompleteIterationError.md b/docs/api/google/generativeai/types/IncompleteIterationError.md
new file mode 100644
index 000000000..ee2aa8cda
--- /dev/null
+++ b/docs/api/google/generativeai/types/IncompleteIterationError.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.IncompleteIterationError
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Model.md b/docs/api/google/generativeai/types/Model.md
new file mode 100644
index 000000000..5331cfa1d
--- /dev/null
+++ b/docs/api/google/generativeai/types/Model.md
@@ -0,0 +1,257 @@
+
+# google.generativeai.types.Model
+
+
+
+
+
+
+
+A dataclass representation of a protos.Model
.
+
+
+google.generativeai.types.Model(
+ name: str,
+ base_model_id: str,
+ version: str,
+ display_name: str,
+ description: str,
+ input_token_limit: int,
+ output_token_limit: int,
+ supported_generation_methods: list[str],
+ temperature: (float | None) = None,
+ max_temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (int | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+The resource name of the `Model`. Format: `models/{model}` with a `{model}` naming
+convention of: "{base_model_id}-{version}". For example: `models/chat-bison-001`.
+
+ |
+
+
+
+`base_model_id`
+
+ |
+
+
+The base name of the model. For example: `chat-bison`.
+
+ |
+
+
+
+`version`
+
+ |
+
+
+ The major version number of the model. For example: `001`.
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+The human-readable name of the model. E.g. `"Chat Bison"`. The name can be up
+to 128 characters long and can consist of any UTF-8 characters.
+
+ |
+
+
+
+`description`
+
+ |
+
+
+A short description of the model.
+
+ |
+
+
+
+`input_token_limit`
+
+ |
+
+
+Maximum number of input tokens allowed for this model.
+
+ |
+
+
+
+`output_token_limit`
+
+ |
+
+
+Maximum number of output tokens available for this model.
+
+ |
+
+
+
+`supported_generation_methods`
+
+ |
+
+
+lists which methods are supported by the model. The method
+names are defined as Pascal case strings, such as `generateMessage` which correspond to
+API methods.
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`max_temperature`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+
+
+## Methods
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+max_temperature
+
+ |
+
+
+`None`
+
+ |
+
+
+
+temperature
+
+ |
+
+
+`None`
+
+ |
+
+
+
+top_k
+
+ |
+
+
+`None`
+
+ |
+
+
+
+top_p
+
+ |
+
+
+`None`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/ModelsIterable.md b/docs/api/google/generativeai/types/ModelsIterable.md
new file mode 100644
index 000000000..0039b3154
--- /dev/null
+++ b/docs/api/google/generativeai/types/ModelsIterable.md
@@ -0,0 +1,19 @@
+
+# google.generativeai.types.ModelsIterable
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ModelsIterable = Iterable[
+ google.generativeai.types.Model
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/PartDict.md b/docs/api/google/generativeai/types/PartDict.md
new file mode 100644
index 000000000..d0df202ae
--- /dev/null
+++ b/docs/api/google/generativeai/types/PartDict.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.PartDict
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/PartType.md b/docs/api/google/generativeai/types/PartType.md
new file mode 100644
index 000000000..73a60027f
--- /dev/null
+++ b/docs/api/google/generativeai/types/PartType.md
@@ -0,0 +1,31 @@
+
+# google.generativeai.types.PartType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+PartType = Union[
+ google.generativeai.protos.Part
,
+ google.generativeai.types.PartDict
,
+ google.generativeai.protos.Blob
,
+ google.generativeai.types.BlobDict
,
+ PIL.Image.Image,
+ IPython.core.display.Image,
+ str,
+ google.generativeai.protos.FunctionCall
,
+ google.generativeai.protos.FunctionResponse
,
+ google.generativeai.types.FileDataDict
,
+ google.generativeai.protos.FileData
,
+ google.generativeai.protos.File
,
+ google.generativeai.types.File
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Permission.md b/docs/api/google/generativeai/types/Permission.md
new file mode 100644
index 000000000..4c5bde2f4
--- /dev/null
+++ b/docs/api/google/generativeai/types/Permission.md
@@ -0,0 +1,290 @@
+
+# google.generativeai.types.Permission
+
+
+
+
+
+
+
+A permission to access a resource.
+
+
+google.generativeai.types.Permission(
+ name: str,
+ role: RoleOptions,
+ grantee_type: Optional[GranteeTypeOptions] = None,
+ email_address: Optional[str] = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`name`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`role`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`grantee_type`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`email_address`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+
+
+## Methods
+
+delete
+
+View source
+
+
+delete(
+ client: (glm.PermissionServiceClient | None) = None
+) -> None
+
+
+Delete permission (self).
+
+
+delete_async
+
+View source
+
+
+delete_async(
+ client=None
+)
+
+
+This is the async version of Permission.delete
.
+
+
+get
+
+View source
+
+
+@classmethod
+get(
+ name: str, client: (glm.PermissionServiceClient | None) = None
+) -> Permission
+
+
+Get information about a specific permission.
+
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The name of the permission to get.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+Requested permission as an instance of `Permission`.
+
+ |
+
+
+
+
+
+
+get_async
+
+View source
+
+
+get_async(
+ name, client=None
+)
+
+
+This is the async version of Permission.get
.
+
+
+to_dict
+
+View source
+
+
+to_dict() -> dict[str, Any]
+
+
+
+
+
+update
+
+View source
+
+
+update(
+ updates: dict[str, Any],
+ client: (glm.PermissionServiceClient | None) = None
+) -> Permission
+
+
+Update a list of fields for a specified permission.
+
+
+
+
+
+Args |
+
+
+
+
+`updates`
+
+ |
+
+
+The list of fields to update.
+Currently only `role` is supported as an update path.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+`Permission` object with specified updates.
+
+ |
+
+
+
+
+
+
+update_async
+
+View source
+
+
+update_async(
+ updates, client=None
+)
+
+
+This is the async version of Permission.update
.
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+email_address
+
+ |
+
+
+`None`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/Permissions.md b/docs/api/google/generativeai/types/Permissions.md
new file mode 100644
index 000000000..bb531c4e6
--- /dev/null
+++ b/docs/api/google/generativeai/types/Permissions.md
@@ -0,0 +1,432 @@
+
+# google.generativeai.types.Permissions
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.Permissions(
+ parent
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`parent`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+create
+
+View source
+
+
+create(
+ role: RoleOptions,
+ grantee_type: Optional[GranteeTypeOptions] = None,
+ email_address: Optional[str] = None,
+ client: (glm.PermissionServiceClient | None) = None
+) -> Permission
+
+
+Create a new permission on a resource (self).
+
+
+
+
+
+Args |
+
+
+
+
+`parent`
+
+ |
+
+
+The resource name of the parent resource in which the permission will be listed.
+
+ |
+
+
+
+`role`
+
+ |
+
+
+role that will be granted by the permission.
+
+ |
+
+
+
+`grantee_type`
+
+ |
+
+
+The type of the grantee for the permission.
+
+ |
+
+
+
+`email_address`
+
+ |
+
+
+The email address of the grantee.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+`Permission` object with specified parent, role, grantee type, and email address.
+
+ |
+
+
+
+
+
+
+
+
+
+Raises |
+
+
+
+
+`ValueError`
+
+ |
+
+
+When email_address is specified and grantee_type is set to EVERYONE.
+
+ |
+
+
+
+`ValueError`
+
+ |
+
+
+When email_address is not specified and grantee_type is not set to EVERYONE.
+
+ |
+
+
+
+
+
+create_async
+
+View source
+
+
+create_async(
+ role, grantee_type=None, email_address=None, client=None
+)
+
+
+This is the async version of `PermissionAdapter.create_permission`.
+
+
+get
+
+View source
+
+
+@classmethod
+get(
+ name: str
+) -> Permission
+
+
+Get information about a specific permission.
+
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The name of the permission to get.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+Requested permission as an instance of `Permission`.
+
+ |
+
+
+
+
+
+
+get_async
+
+View source
+
+
+get_async(
+ name
+)
+
+
+Get information about a specific permission.
+
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+The name of the permission to get.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+Requested permission as an instance of `Permission`.
+
+ |
+
+
+
+
+
+
+list
+
+View source
+
+
+list(
+ page_size: Optional[int] = None,
+ client: (glm.PermissionServiceClient | None) = None
+) -> Iterable[Permission]
+
+
+List `Permission`s enforced on a resource (self).
+
+
+
+
+
+Args |
+
+
+
+
+`parent`
+
+ |
+
+
+The resource name of the parent resource in which the permission will be listed.
+
+ |
+
+
+
+`page_size`
+
+ |
+
+
+The maximum number of permissions to return (per page). The service may return fewer permissions.
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+Paginated list of `Permission` objects.
+
+ |
+
+
+
+
+
+
+list_async
+
+View source
+
+
+list_async(
+ page_size=None, client=None
+)
+
+
+This is the async version of `PermissionAdapter.list_permissions`.
+
+
+transfer_ownership
+
+View source
+
+
+transfer_ownership(
+ email_address: str, client: (glm.PermissionServiceClient | None) = None
+) -> None
+
+
+Transfer ownership of a resource (self) to a new owner.
+
+
+
+
+
+Args |
+
+
+
+
+`name`
+
+ |
+
+
+Name of the resource to transfer ownership.
+
+ |
+
+
+
+`email_address`
+
+ |
+
+
+Email address of the new owner.
+
+ |
+
+
+
+
+
+transfer_ownership_async
+
+View source
+
+
+transfer_ownership_async(
+ email_address, client=None
+)
+
+
+This is the async version of `PermissionAdapter.transfer_ownership`.
+
+
+__iter__
+
+View source
+
+
+__iter__()
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/RequestOptions.md b/docs/api/google/generativeai/types/RequestOptions.md
new file mode 100644
index 000000000..bc23ea02e
--- /dev/null
+++ b/docs/api/google/generativeai/types/RequestOptions.md
@@ -0,0 +1,209 @@
+
+# google.generativeai.types.RequestOptions
+
+
+
+
+
+
+
+Request options
+
+
+google.generativeai.types.RequestOptions(
+ *,
+ retry: (google.api_core.retry.Retry | None) = None,
+ timeout: (int | float | google.api_core.timeout.TimeToDeadlineTimeout | None) = None
+)
+
+
+
+
+
+
+
+```
+>>> import google.generativeai as genai
+>>> from google.generativeai.types import RequestOptions
+>>> from google.api_core import retry
+>>>
+>>> model = genai.GenerativeModel()
+>>> response = model.generate_content('Hello',
+... request_options=RequestOptions(
+... retry=retry.Retry(initial=10, multiplier=2, maximum=60, timeout=300)))
+>>> response = model.generate_content('Hello',
+... request_options=RequestOptions(timeout=600)))
+```
+
+
+
+
+Args |
+
+
+
+
+`retry`
+
+ |
+
+
+Refer to [retry docs](https://googleapis.dev/python/google-api-core/latest/retry.html) for details.
+
+ |
+
+
+
+`timeout`
+
+ |
+
+
+In seconds (or provide a [TimeToDeadlineTimeout](https://googleapis.dev/python/google-api-core/latest/timeout.html) object).
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`retry`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`timeout`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+
+
+## Methods
+
+get
+
+
+get(
+ key, default=None
+)
+
+
+D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.
+
+
+items
+
+
+items()
+
+
+D.items() -> a set-like object providing a view on D's items
+
+
+keys
+
+
+keys()
+
+
+D.keys() -> a set-like object providing a view on D's keys
+
+
+values
+
+
+values()
+
+
+D.values() -> an object providing a view on D's values
+
+
+__contains__
+
+
+__contains__(
+ key
+)
+
+
+
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__getitem__
+
+View source
+
+
+__getitem__(
+ item
+)
+
+
+
+
+
+__iter__
+
+View source
+
+
+__iter__()
+
+
+
+
+
+__len__
+
+View source
+
+
+__len__()
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/RequestOptionsType.md b/docs/api/google/generativeai/types/RequestOptionsType.md
new file mode 100644
index 000000000..8466d0e31
--- /dev/null
+++ b/docs/api/google/generativeai/types/RequestOptionsType.md
@@ -0,0 +1,20 @@
+
+# google.generativeai.types.RequestOptionsType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+RequestOptionsType = Union[
+ google.generativeai.types.RequestOptions
,
+ google.generativeai.types.helper_types.RequestOptionsDict
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/SafetyFeedbackDict.md b/docs/api/google/generativeai/types/SafetyFeedbackDict.md
new file mode 100644
index 000000000..9cd289074
--- /dev/null
+++ b/docs/api/google/generativeai/types/SafetyFeedbackDict.md
@@ -0,0 +1,65 @@
+
+# google.generativeai.types.SafetyFeedbackDict
+
+
+
+
+
+
+
+Safety feedback for an entire request.
+
+
+
+This field is populated if content in the input and/or response
+is blocked due to safety settings. SafetyFeedback may not exist
+for every HarmCategory. Each SafetyFeedback will return the
+safety settings used by the request as well as the lowest
+HarmProbability that should be allowed in order to return a
+result.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`rating`
+
+ |
+
+
+`google.ai.generativelanguage.SafetyRating`
+
+Safety rating evaluated from content.
+
+ |
+
+
+
+`setting`
+
+ |
+
+
+`google.ai.generativelanguage.SafetySetting`
+
+Safety settings applied to the request.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/SafetyRatingDict.md b/docs/api/google/generativeai/types/SafetyRatingDict.md
new file mode 100644
index 000000000..69fce139a
--- /dev/null
+++ b/docs/api/google/generativeai/types/SafetyRatingDict.md
@@ -0,0 +1,79 @@
+
+# google.generativeai.types.SafetyRatingDict
+
+
+
+
+
+
+
+Safety rating for a piece of content.
+
+
+
+The safety rating contains the category of harm and the harm
+probability level in that category for a piece of content.
+Content is classified for safety across a number of harm
+categories and the probability of the harm classification is
+included here.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`category`
+
+ |
+
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this rating.
+
+ |
+
+
+
+`probability`
+
+ |
+
+
+`google.ai.generativelanguage.SafetyRating.HarmProbability`
+
+Required. The probability of harm for this
+content.
+
+ |
+
+
+
+`blocked`
+
+ |
+
+
+`bool`
+
+Was this content blocked because of this
+rating?
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/SafetySettingDict.md b/docs/api/google/generativeai/types/SafetySettingDict.md
new file mode 100644
index 000000000..cd8ec5731
--- /dev/null
+++ b/docs/api/google/generativeai/types/SafetySettingDict.md
@@ -0,0 +1,62 @@
+
+# google.generativeai.types.SafetySettingDict
+
+
+
+
+
+
+
+Safety setting, affecting the safety-blocking behavior.
+
+
+
+Passing a safety setting for a category changes the allowed
+probability that content is blocked.
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`category`
+
+ |
+
+
+`google.ai.generativelanguage.HarmCategory`
+
+Required. The category for this setting.
+
+ |
+
+
+
+`threshold`
+
+ |
+
+
+`google.ai.generativelanguage.SafetySetting.HarmBlockThreshold`
+
+Required. Controls the probability threshold
+at which harm is blocked.
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Status.md b/docs/api/google/generativeai/types/Status.md
new file mode 100644
index 000000000..f86d6e31c
--- /dev/null
+++ b/docs/api/google/generativeai/types/Status.md
@@ -0,0 +1,61 @@
+
+# google.generativeai.types.Status
+
+
+
+
+
+
+
+A ProtocolMessage
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`code`
+
+ |
+
+
+`int32 code`
+
+ |
+
+
+
+`details`
+
+ |
+
+
+`repeated Any details`
+
+ |
+
+
+
+`message`
+
+ |
+
+
+`string message`
+
+ |
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/StopCandidateException.md b/docs/api/google/generativeai/types/StopCandidateException.md
new file mode 100644
index 000000000..c9e71b357
--- /dev/null
+++ b/docs/api/google/generativeai/types/StopCandidateException.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.StopCandidateException
+
+
+
+
+
+
+
+Common base class for all non-exit exceptions.
+
+
+
+
diff --git a/docs/api/google/generativeai/types/StrictContentType.md b/docs/api/google/generativeai/types/StrictContentType.md
new file mode 100644
index 000000000..008b7b257
--- /dev/null
+++ b/docs/api/google/generativeai/types/StrictContentType.md
@@ -0,0 +1,20 @@
+
+# google.generativeai.types.StrictContentType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+StrictContentType = Union[
+ google.generativeai.protos.Content
,
+ google.generativeai.types.ContentDict
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/Tool.md b/docs/api/google/generativeai/types/Tool.md
new file mode 100644
index 000000000..68c3dfb50
--- /dev/null
+++ b/docs/api/google/generativeai/types/Tool.md
@@ -0,0 +1,118 @@
+
+# google.generativeai.types.Tool
+
+
+
+
+
+
+
+A wrapper for protos.Tool
, Contains a collection of related `FunctionDeclaration` objects, protos.CodeExecution object, and protos.GoogleSearchRetrieval object.
+
+
+google.generativeai.types.Tool(
+ *,
+ function_declarations: (Iterable[FunctionDeclarationType] | None) = None,
+ google_search_retrieval: (GoogleSearchRetrievalType | None) = None,
+ code_execution: (protos.CodeExecution | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`code_execution`
+
+ |
+
+
+
+
+ |
+
+
+
+`function_declarations`
+
+ |
+
+
+
+
+ |
+
+
+
+`google_search_retrieval`
+
+ |
+
+
+
+
+ |
+
+
+
+
+
+## Methods
+
+to_proto
+
+View source
+
+
+to_proto()
+
+
+
+
+
+__call__
+
+View source
+
+
+__call__(
+ fc: protos.FunctionCall
+) -> (protos.FunctionResponse | None)
+
+
+Call self as a function.
+
+
+__getitem__
+
+View source
+
+
+__getitem__(
+ name: (str | protos.FunctionCall)
+) -> (FunctionDeclaration | protos.FunctionDeclaration)
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ToolDict.md b/docs/api/google/generativeai/types/ToolDict.md
new file mode 100644
index 000000000..7c58c9de2
--- /dev/null
+++ b/docs/api/google/generativeai/types/ToolDict.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.ToolDict
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/ToolsType.md b/docs/api/google/generativeai/types/ToolsType.md
new file mode 100644
index 000000000..9cf0a768f
--- /dev/null
+++ b/docs/api/google/generativeai/types/ToolsType.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.types.ToolsType
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+ToolsType = Union[
+ Iterable[Union[str, google.generativeai.types.Tool
, google.generativeai.protos.Tool
, google.generativeai.types.ToolDict
, Iterable[google.generativeai.types.FunctionDeclarationType
], google.generativeai.types.FunctionDeclaration
, google.generativeai.protos.FunctionDeclaration
, dict[str, Any], Callable[..., Any]]],
+ str,
+ google.generativeai.types.Tool
,
+ google.generativeai.protos.Tool
,
+ google.generativeai.types.ToolDict
,
+ Iterable[google.generativeai.types.FunctionDeclarationType
],
+ google.generativeai.types.FunctionDeclaration
,
+ google.generativeai.protos.FunctionDeclaration
,
+ dict[str, Any],
+ Callable[..., Any]
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/TunedModel.md b/docs/api/google/generativeai/types/TunedModel.md
new file mode 100644
index 000000000..6316be660
--- /dev/null
+++ b/docs/api/google/generativeai/types/TunedModel.md
@@ -0,0 +1,375 @@
+
+# google.generativeai.types.TunedModel
+
+
+
+
+
+
+
+A dataclass representation of a protos.TunedModel
.
+
+
+google.generativeai.types.TunedModel(
+ name: (str | None) = None,
+ source_model: (str | None) = None,
+ base_model: (str | None) = None,
+ display_name: str = '',
+ description: str = '',
+ temperature: (float | None) = None,
+ top_p: (float | None) = None,
+ top_k: (float | None) = None,
+ state: TunedModelState = TunedModelState.STATE_UNSPECIFIED,
+ create_time: (datetime.datetime | None) = None,
+ update_time: (datetime.datetime | None) = None,
+ tuning_task: (TuningTask | None) = None,
+ reader_project_numbers: (list[int] | None) = None
+)
+
+
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`permissions`
+
+ |
+
+
+
+
+ |
+
+
+
+`name`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`source_model`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`base_model`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`description`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`temperature`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`top_p`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`top_k`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`state`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`create_time`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`update_time`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`tuning_task`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+`reader_project_numbers`
+
+ |
+
+
+Dataclass field
+
+ |
+
+
+
+
+
+## Methods
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+base_model
+
+ |
+
+
+`None`
+
+ |
+
+
+
+create_time
+
+ |
+
+
+`None`
+
+ |
+
+
+
+description
+
+ |
+
+
+`''`
+
+ |
+
+
+
+display_name
+
+ |
+
+
+`''`
+
+ |
+
+
+
+name
+
+ |
+
+
+`None`
+
+ |
+
+
+
+reader_project_numbers
+
+ |
+
+
+`None`
+
+ |
+
+
+
+source_model
+
+ |
+
+
+`None`
+
+ |
+
+
+
+state
+
+ |
+
+
+``
+
+ |
+
+
+
+temperature
+
+ |
+
+
+`None`
+
+ |
+
+
+
+top_k
+
+ |
+
+
+`None`
+
+ |
+
+
+
+top_p
+
+ |
+
+
+`None`
+
+ |
+
+
+
+tuning_task
+
+ |
+
+
+`None`
+
+ |
+
+
+
+update_time
+
+ |
+
+
+`None`
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/TunedModelNameOptions.md b/docs/api/google/generativeai/types/TunedModelNameOptions.md
new file mode 100644
index 000000000..0ab3d7879
--- /dev/null
+++ b/docs/api/google/generativeai/types/TunedModelNameOptions.md
@@ -0,0 +1,21 @@
+
+# google.generativeai.types.TunedModelNameOptions
+
+
+This symbol is a **type alias**.
+
+
+
+#### Source:
+
+
+TunedModelNameOptions = Union[
+ str,
+ google.generativeai.types.TunedModel
,
+ google.generativeai.protos.TunedModel
+]
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/TunedModelState.md b/docs/api/google/generativeai/types/TunedModelState.md
new file mode 100644
index 000000000..bf55c2738
--- /dev/null
+++ b/docs/api/google/generativeai/types/TunedModelState.md
@@ -0,0 +1,706 @@
+
+# google.generativeai.types.TunedModelState
+
+
+
+
+
+
+
+The state of the tuned model.
+
+
+ View aliases
+
+Main aliases
+
`google.generativeai.protos.TunedModel.State`
+
+
+
+
+google.generativeai.types.TunedModelState(
+ *args, **kwds
+)
+
+
+
+
+
+
+
+
+
+
+Values |
+
+
+
+
+`STATE_UNSPECIFIED`
+
+ |
+
+
+`0`
+
+The default value. This value is unused.
+
+ |
+
+
+
+`CREATING`
+
+ |
+
+
+`1`
+
+The model is being created.
+
+ |
+
+
+
+`ACTIVE`
+
+ |
+
+
+`2`
+
+The model is ready to be used.
+
+ |
+
+
+
+`FAILED`
+
+ |
+
+
+`3`
+
+The model failed to be created.
+
+ |
+
+
+
+
+
+
+
+
+
+
+Attributes |
+
+
+
+
+`denominator`
+
+ |
+
+
+the denominator of a rational number in lowest terms
+
+ |
+
+
+
+`imag`
+
+ |
+
+
+the imaginary part of a complex number
+
+ |
+
+
+
+`numerator`
+
+ |
+
+
+the numerator of a rational number in lowest terms
+
+ |
+
+
+
+`real`
+
+ |
+
+
+the real part of a complex number
+
+ |
+
+
+
+
+
+## Methods
+
+as_integer_ratio
+
+
+as_integer_ratio()
+
+
+Return a pair of integers, whose ratio is equal to the original int.
+
+The ratio is in lowest terms and has a positive denominator.
+
+```
+>>> (10).as_integer_ratio()
+(10, 1)
+>>> (-10).as_integer_ratio()
+(-10, 1)
+>>> (0).as_integer_ratio()
+(0, 1)
+```
+
+bit_count
+
+
+bit_count()
+
+
+Number of ones in the binary representation of the absolute value of self.
+
+Also known as the population count.
+
+```
+>>> bin(13)
+'0b1101'
+>>> (13).bit_count()
+3
+```
+
+bit_length
+
+
+bit_length()
+
+
+Number of bits necessary to represent self in binary.
+
+```
+>>> bin(37)
+'0b100101'
+>>> (37).bit_length()
+6
+```
+
+conjugate
+
+
+conjugate()
+
+
+Returns self, the complex conjugate of any int.
+
+
+from_bytes
+
+
+from_bytes(
+ byteorder='big', *, signed=False
+)
+
+
+Return the integer represented by the given array of bytes.
+
+bytes
+ Holds the array of bytes to convert. The argument must either
+ support the buffer protocol or be an iterable object producing bytes.
+ Bytes and bytearray are examples of built-in objects that support the
+ buffer protocol.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Indicates whether two's complement is used to represent the integer.
+
+is_integer
+
+
+is_integer()
+
+
+Returns True. Exists for duck type compatibility with float.is_integer.
+
+
+to_bytes
+
+
+to_bytes(
+ length=1, byteorder='big', *, signed=False
+)
+
+
+Return an array of bytes representing an integer.
+
+length
+ Length of bytes object to use. An OverflowError is raised if the
+ integer is not representable with the given number of bytes. Default
+ is length 1.
+byteorder
+ The byte order used to represent the integer. If byteorder is 'big',
+ the most significant byte is at the beginning of the byte array. If
+ byteorder is 'little', the most significant byte is at the end of the
+ byte array. To request the native byte order of the host system, use
+ `sys.byteorder' as the byte order value. Default is to use 'big'.
+signed
+ Determines whether two's complement is used to represent the integer.
+ If signed is False and a negative integer is given, an OverflowError
+ is raised.
+
+__abs__
+
+
+__abs__()
+
+
+abs(self)
+
+
+__add__
+
+
+__add__(
+ value, /
+)
+
+
+Return self+value.
+
+
+__and__
+
+
+__and__(
+ value, /
+)
+
+
+Return self&value.
+
+
+__bool__
+
+
+__bool__()
+
+
+True if self else False
+
+
+__eq__
+
+
+__eq__(
+ other
+)
+
+
+Return self==value.
+
+
+__floordiv__
+
+
+__floordiv__(
+ value, /
+)
+
+
+Return self//value.
+
+
+__ge__
+
+
+__ge__(
+ other
+)
+
+
+Return self>=value.
+
+
+__gt__
+
+
+__gt__(
+ other
+)
+
+
+Return self>value.
+
+
+__invert__
+
+
+__invert__()
+
+
+~self
+
+
+__le__
+
+
+__le__(
+ other
+)
+
+
+Return self<=value.
+
+
+__lshift__
+
+
+__lshift__(
+ value, /
+)
+
+
+Return self<__lt__
+
+
+__lt__(
+ other
+)
+
+
+Return self__mod__
+
+
+__mod__(
+ value, /
+)
+
+
+Return self%value.
+
+
+__mul__
+
+
+__mul__(
+ value, /
+)
+
+
+Return self*value.
+
+
+__ne__
+
+
+__ne__(
+ other
+)
+
+
+Return self!=value.
+
+
+__neg__
+
+
+__neg__()
+
+
+-self
+
+
+__or__
+
+
+__or__(
+ value, /
+)
+
+
+Return self|value.
+
+
+__pos__
+
+
+__pos__()
+
+
++self
+
+
+__pow__
+
+
+__pow__(
+ value, mod, /
+)
+
+
+Return pow(self, value, mod).
+
+
+__radd__
+
+
+__radd__(
+ value, /
+)
+
+
+Return value+self.
+
+
+__rand__
+
+
+__rand__(
+ value, /
+)
+
+
+Return value&self.
+
+
+__rfloordiv__
+
+
+__rfloordiv__(
+ value, /
+)
+
+
+Return value//self.
+
+
+__rlshift__
+
+
+__rlshift__(
+ value, /
+)
+
+
+Return value<__rmod__
+
+
+__rmod__(
+ value, /
+)
+
+
+Return value%self.
+
+
+__rmul__
+
+
+__rmul__(
+ value, /
+)
+
+
+Return value*self.
+
+
+__ror__
+
+
+__ror__(
+ value, /
+)
+
+
+Return value|self.
+
+
+__rpow__
+
+
+__rpow__(
+ value, mod, /
+)
+
+
+Return pow(value, self, mod).
+
+
+__rrshift__
+
+
+__rrshift__(
+ value, /
+)
+
+
+Return value>>self.
+
+
+__rshift__
+
+
+__rshift__(
+ value, /
+)
+
+
+Return self>>value.
+
+
+__rsub__
+
+
+__rsub__(
+ value, /
+)
+
+
+Return value-self.
+
+
+__rtruediv__
+
+
+__rtruediv__(
+ value, /
+)
+
+
+Return value/self.
+
+
+__rxor__
+
+
+__rxor__(
+ value, /
+)
+
+
+Return value^self.
+
+
+__sub__
+
+
+__sub__(
+ value, /
+)
+
+
+Return self-value.
+
+
+__truediv__
+
+
+__truediv__(
+ value, /
+)
+
+
+Return self/value.
+
+
+__xor__
+
+
+__xor__(
+ value, /
+)
+
+
+Return self^value.
+
+
+
+
+
+
+
+
+
+Class Variables |
+
+
+
+
+ACTIVE
+
+ |
+
+
+``
+
+ |
+
+
+
+CREATING
+
+ |
+
+
+``
+
+ |
+
+
+
+FAILED
+
+ |
+
+
+``
+
+ |
+
+
+
+STATE_UNSPECIFIED
+
+ |
+
+
+``
+
+ |
+
+
+
diff --git a/docs/api/google/generativeai/types/TypedDict.md b/docs/api/google/generativeai/types/TypedDict.md
new file mode 100644
index 000000000..4e7aeb78d
--- /dev/null
+++ b/docs/api/google/generativeai/types/TypedDict.md
@@ -0,0 +1,67 @@
+
+# google.generativeai.types.TypedDict
+
+
+
+
+
+
+
+A simple typed namespace. At runtime it is equivalent to a plain dict.
+
+
+
+google.generativeai.types.TypedDict(
+ typename, fields, /, *, total=True, closed=False, **kwargs
+)
+
+
+
+
+
+
+TypedDict creates a dictionary type such that a type checker will expect all
+instances to have a certain set of keys, where each key is
+associated with a value of a consistent type. This expectation
+is not checked at runtime.
+
+Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+The type info can be accessed via the Point2D.__annotations__ dict, and
+the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+TypedDict supports an additional equivalent form::
+
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+By default, all keys must be present in a TypedDict. It is possible
+to override this by specifying totality::
+
+ class Point2D(TypedDict, total=False):
+ x: int
+ y: int
+
+This means that a Point2D TypedDict can have any of the keys omitted. A type
+checker is only expected to support a literal False or True as the value of
+the total argument. True is the default, and makes all items defined in the
+class body be required.
+
+The Required and NotRequired special forms can also be used to mark
+individual keys as being required or not required::
+
+ class Point2D(TypedDict):
+ x: int # the "x" key must always be present (Required is the default)
+ y: NotRequired[int] # the "y" key can be omitted
+
+See PEP 655 for more details on Required and NotRequired.
\ No newline at end of file
diff --git a/docs/api/google/generativeai/types/get_default_file_client.md b/docs/api/google/generativeai/types/get_default_file_client.md
new file mode 100644
index 000000000..9d575aa98
--- /dev/null
+++ b/docs/api/google/generativeai/types/get_default_file_client.md
@@ -0,0 +1,26 @@
+
+# google.generativeai.types.get_default_file_client
+
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.get_default_file_client() -> glm.FilesServiceClient
+
+
+
+
+
diff --git a/docs/api/google/generativeai/types/to_file_data.md b/docs/api/google/generativeai/types/to_file_data.md
new file mode 100644
index 000000000..2b0f5a819
--- /dev/null
+++ b/docs/api/google/generativeai/types/to_file_data.md
@@ -0,0 +1,28 @@
+
+# google.generativeai.types.to_file_data
+
+
+
+
+
+
+
+
+
+
+
+google.generativeai.types.to_file_data(
+ file_data: FileDataType
+)
+
+
+
+
+
diff --git a/docs/api/google/generativeai/update_tuned_model.md b/docs/api/google/generativeai/update_tuned_model.md
new file mode 100644
index 000000000..469b3f6e6
--- /dev/null
+++ b/docs/api/google/generativeai/update_tuned_model.md
@@ -0,0 +1,32 @@
+
+# google.generativeai.update_tuned_model
+
+
+
+
+
+
+
+Calls the API to push updates to a specified tuned model where only certain attributes are updatable.
+
+
+
+google.generativeai.update_tuned_model(
+ tuned_model: (str | protos.TunedModel),
+ updates: (dict[str, Any] | None) = None,
+ *,
+ client: (glm.ModelServiceClient | None) = None,
+ request_options: (helper_types.RequestOptionsType | None) = None
+) -> model_types.TunedModel
+
+
+
+
+
diff --git a/docs/api/google/generativeai/upload_file.md b/docs/api/google/generativeai/upload_file.md
new file mode 100644
index 000000000..3dda9fab8
--- /dev/null
+++ b/docs/api/google/generativeai/upload_file.md
@@ -0,0 +1,123 @@
+
+# google.generativeai.upload_file
+
+
+
+
+
+
+
+Calls the API to upload a file using a supported file service.
+
+
+
+google.generativeai.upload_file(
+ path: (str | pathlib.Path | os.PathLike | IOBase),
+ *,
+ mime_type: (str | None) = None,
+ name: (str | None) = None,
+ display_name: (str | None) = None,
+ resumable: bool = True
+) -> file_types.File
+
+
+
+
+
+
+
+
+
+
+Args |
+
+
+
+
+`path`
+
+ |
+
+
+The path to the file or a file-like object (e.g., BytesIO) to be uploaded.
+
+ |
+
+
+
+`mime_type`
+
+ |
+
+
+The MIME type of the file. If not provided, it will be
+inferred from the file extension.
+
+ |
+
+
+
+`name`
+
+ |
+
+
+The name of the file in the destination (e.g., 'files/sample-image').
+If not provided, a system generated ID will be created.
+
+ |
+
+
+
+`display_name`
+
+ |
+
+
+Optional display name of the file.
+
+ |
+
+
+
+`resumable`
+
+ |
+
+
+Whether to use the resumable upload protocol. By default, this is enabled.
+See details at
+https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http.MediaFileUpload-class.html#resumable
+
+ |
+
+
+
+
+
+
+
+
+Returns |
+
+
+
+
+`file_types.File`
+
+ |
+
+
+The response of the uploaded file.
+
+ |
+
+
+
diff --git a/docs/build_docs.py b/docs/build_docs.py
index 012cd3441..1f6743299 100644
--- a/docs/build_docs.py
+++ b/docs/build_docs.py
@@ -22,13 +22,11 @@
$> python build_docs.py
"""
-import os
import pathlib
import re
import textwrap
import typing
-
from absl import app
from absl import flags
@@ -46,8 +44,11 @@
typing.TYPE_CHECKING = True
from google import generativeai as genai
+from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import generate_lib
from tensorflow_docs.api_generator import public_api
+from tensorflow_docs.api_generator import parser
+from tensorflow_docs.api_generator.pretty_docs import base_page
import yaml
@@ -74,6 +75,37 @@
"where to find the project code",
)
+parser.ITEMS_TEMPLATE = textwrap.dedent(
+ """\
+
+
+
+ {name}{anchor}
+
+ |
+
+
+ {description}
+
+ |
+
"""
+)
+
+parser.TEXT_TEMPLATE = textwrap.dedent(
+ """\
+
+
+
+ {text}
+
+ |
+
"""
+)
+
+base_page.TABLE_HEADER = ''
+
+base_page.TemplatePageBuilder.get_devsite_headers = lambda x: ""
+
def gen_api_docs():
"""Generates api docs for the generative-ai package."""
diff --git a/google/generativeai/__init__.py b/google/generativeai/__init__.py
index 4fe362689..5b143d768 100644
--- a/google/generativeai/__init__.py
+++ b/google/generativeai/__init__.py
@@ -42,14 +42,11 @@
from google.generativeai import version
+from google.generativeai import caching
from google.generativeai import protos
from google.generativeai import types
-from google.generativeai.types import GenerationConfig
-
-from google.generativeai.discuss import chat
-from google.generativeai.discuss import chat_async
-from google.generativeai.discuss import count_message_tokens
+from google.generativeai.client import configure
from google.generativeai.embedding import embed_content
from google.generativeai.embedding import embed_content_async
@@ -62,10 +59,6 @@
from google.generativeai.generative_models import GenerativeModel
from google.generativeai.generative_models import ChatSession
-from google.generativeai.text import generate_text
-from google.generativeai.text import generate_embeddings
-from google.generativeai.text import count_text_tokens
-
from google.generativeai.models import list_models
from google.generativeai.models import list_tuned_models
@@ -80,16 +73,13 @@
from google.generativeai.operations import list_operations
from google.generativeai.operations import get_operation
-
-from google.generativeai.client import configure
+from google.generativeai.types import GenerationConfig
__version__ = version.__version__
-del discuss
del embedding
del files
del generative_models
-del text
del models
del client
del operations
diff --git a/google/generativeai/answer.py b/google/generativeai/answer.py
index 4dd93feaf..83bf5f679 100644
--- a/google/generativeai/answer.py
+++ b/google/generativeai/answer.py
@@ -283,7 +283,7 @@ def generate_answer(
answer_style: Style in which the grounded answer should be returned.
safety_settings: Safety settings for generated output. Defaults to None.
temperature: Controls the randomness of the output.
- client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
+ client: If you're not relying on a default client, you pass a `glm.GenerativeServiceClient` instead.
request_options: Options for the request.
Returns:
@@ -337,7 +337,7 @@ async def generate_answer_async(
answer_style: Style in which the grounded answer should be returned.
safety_settings: Safety settings for generated output. Defaults to None.
temperature: Controls the randomness of the output.
- client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
+ client: If you're not relying on a default client, you pass a `glm.GenerativeServiceClient` instead.
Returns:
A `types.Answer` containing the model's text answer response.
diff --git a/google/generativeai/client.py b/google/generativeai/client.py
index 5d7b6996b..113592594 100644
--- a/google/generativeai/client.py
+++ b/google/generativeai/client.py
@@ -2,12 +2,14 @@
import os
import contextlib
+import inspect
import dataclasses
import pathlib
-import types
+import threading
from typing import Any, cast
from collections.abc import Sequence
import httplib2
+from io import IOBase
import google.ai.generativelanguage as glm
import google.generativeai.protos as protos
@@ -30,6 +32,21 @@
__version__ = "0.0.0"
USER_AGENT = "genai-py"
+
+#### Caution! ####
+# - It would make sense for the discovery URL to respect the client_options.endpoint setting.
+# - That would make testing Files on the staging server possible.
+# - We tried fixing this once, but broke colab in the process because their endpoint didn't forward the discovery
+# requests. https://github.com/google-gemini/generative-ai-python/pull/333
+# - Kaggle would have a similar problem (b/362278209).
+# - I think their proxy would forward the discovery traffic.
+# - But they don't need to intercept the files-service at all, and uploads of large files could overload them.
+# - Do the scotty uploads go to the same domain?
+# - If you do route the discovery call to kaggle, be sure to attach the default_metadata (they need it).
+# - One solution to all this would be if configure could take overrides per service.
+# - set client_options.endpoint, but use a different endpoint for file service? It's not clear how best to do that
+# through the file service.
+##################
GENAI_API_DISCOVERY_URL = "https://generativelanguage.googleapis.com/$discovery/rest"
@@ -48,9 +65,10 @@ def patch_colab_gce_credentials():
class FileServiceClient(glm.FileServiceClient):
def __init__(self, *args, **kwargs):
self._discovery_api = None
+ self._local = threading.local()
super().__init__(*args, **kwargs)
- def _setup_discovery_api(self):
+ def _setup_discovery_api(self, metadata: dict | Sequence[tuple[str, str]] = ()):
api_key = self._client_options.api_key
if api_key is None:
raise ValueError(
@@ -61,26 +79,28 @@ def _setup_discovery_api(self):
http=httplib2.Http(),
postproc=lambda resp, content: (resp, content),
uri=f"{GENAI_API_DISCOVERY_URL}?version=v1beta&key={api_key}",
+ headers=dict(metadata),
)
response, content = request.execute()
request.http.close()
discovery_doc = content.decode("utf-8")
- self._discovery_api = googleapiclient.discovery.build_from_document(
+ self._local.discovery_api = googleapiclient.discovery.build_from_document(
discovery_doc, developerKey=api_key
)
def create_file(
self,
- path: str | pathlib.Path | os.PathLike,
+ path: str | pathlib.Path | os.PathLike | IOBase,
*,
mime_type: str | None = None,
name: str | None = None,
display_name: str | None = None,
resumable: bool = True,
+ metadata: Sequence[tuple[str, str]] = (),
) -> protos.File:
if self._discovery_api is None:
- self._setup_discovery_api()
+ self._setup_discovery_api(metadata)
file = {}
if name is not None:
@@ -88,10 +108,18 @@ def create_file(
if display_name is not None:
file["displayName"] = display_name
- media = googleapiclient.http.MediaFileUpload(
- filename=path, mimetype=mime_type, resumable=resumable
- )
- request = self._discovery_api.media().upload(body={"file": file}, media_body=media)
+ if isinstance(path, IOBase):
+ media = googleapiclient.http.MediaIoBaseUpload(
+ fd=path, mimetype=mime_type, resumable=resumable
+ )
+ else:
+ media = googleapiclient.http.MediaFileUpload(
+ filename=path, mimetype=mime_type, resumable=resumable
+ )
+
+ request = self._local.discovery_api.media().upload(body={"file": file}, media_body=media)
+ for key, value in metadata:
+ request.headers[key] = value
result = request.execute()
return self.get_file({"name": result["file"]["name"]})
@@ -108,9 +136,6 @@ async def create_file(self, *args, **kwargs):
class _ClientManager:
client_config: dict[str, Any] = dataclasses.field(default_factory=dict)
default_metadata: Sequence[tuple[str, str]] = ()
-
- discuss_client: glm.DiscussServiceClient | None = None
- discuss_async_client: glm.DiscussServiceAsyncClient | None = None
clients: dict[str, Any] = dataclasses.field(default_factory=dict)
def configure(
@@ -119,7 +144,7 @@ def configure(
api_key: str | None = None,
credentials: ga_credentials.Credentials | dict | None = None,
# The user can pass a string to choose `rest` or `grpc` or 'grpc_asyncio'.
- # See `_transport_registry` in `DiscussServiceClientMeta`.
+ # See _transport_registry in the google.ai.generativelanguage package.
# Since the transport classes align with the client classes it wouldn't make
# sense to accept a `Transport` object here even though the client classes can.
# We could accept a dict since all the `Transport` classes take the same args,
@@ -132,7 +157,8 @@ def configure(
"""Initializes default client configurations using specified parameters or environment variables.
If no API key has been provided (either directly, or on `client_options`) and the
- `GOOGLE_API_KEY` environment variable is set, it will be used as the API key.
+ `GEMINI_API_KEY` environment variable is set, it will be used as the API key. If not,
+ if the `GOOGLE_API_KEY` environement variable is set, it will be used as the API key.
Note: Not all arguments are detailed below. Refer to the `*ServiceClient` classes in
`google.ai.generativelanguage` for details on the other arguments.
@@ -141,8 +167,8 @@ def configure(
transport: A string, one of: [`rest`, `grpc`, `grpc_asyncio`].
api_key: The API-Key to use when creating the default clients (each service uses
a separate client). This is a shortcut for `client_options={"api_key": api_key}`.
- If omitted, and the `GOOGLE_API_KEY` environment variable is set, it will be
- used.
+ If omitted, and the `GEMINI_API_KEY` or the `GOOGLE_API_KEY` environment variable
+ are set, they will be used in this order of priority.
default_metadata: Default (key, value) metadata pairs to send with every request.
when using `transport="rest"` these are sent as HTTP headers.
"""
@@ -159,9 +185,14 @@ def configure(
"Invalid configuration: Please set either `api_key` or `client_options['api_key']`, but not both."
)
else:
- if api_key is None:
+ if not api_key:
# If no key is provided explicitly, attempt to load one from the
# environment.
+ api_key = os.getenv("GEMINI_API_KEY")
+
+ if not api_key:
+ # If the GEMINI_API_KEY doesn't exist, attempt to load the
+ # GOOGLE_API_KEY from the environment.
api_key = os.getenv("GOOGLE_API_KEY")
client_options.api_key = api_key
@@ -223,16 +254,14 @@ def make_client(self, name):
def keep(name, f):
if name.startswith("_"):
return False
- elif name == "create_file":
- return False
- elif not isinstance(f, types.FunctionType):
- return False
- elif isinstance(f, classmethod):
+
+ if not callable(f):
return False
- elif isinstance(f, staticmethod):
+
+ if "metadata" not in inspect.signature(f).parameters.keys():
return False
- else:
- return True
+
+ return True
def add_default_metadata_wrapper(f):
def call(*args, metadata=(), **kwargs):
@@ -241,7 +270,7 @@ def call(*args, metadata=(), **kwargs):
return call
- for name, value in cls.__dict__.items():
+ for name, value in inspect.getmembers(cls):
if not keep(name, value):
continue
f = getattr(client, name)
@@ -275,7 +304,6 @@ def configure(
api_key: str | None = None,
credentials: ga_credentials.Credentials | dict | None = None,
# The user can pass a string to choose `rest` or `grpc` or 'grpc_asyncio'.
- # See `_transport_registry` in `DiscussServiceClientMeta`.
# Since the transport classes align with the client classes it wouldn't make
# sense to accept a `Transport` object here even though the client classes can.
# We could accept a dict since all the `Transport` classes take the same args,
@@ -320,14 +348,6 @@ def get_default_cache_client() -> glm.CacheServiceClient:
return _client_manager.get_default_client("cache")
-def get_default_discuss_client() -> glm.DiscussServiceClient:
- return _client_manager.get_default_client("discuss")
-
-
-def get_default_discuss_async_client() -> glm.DiscussServiceAsyncClient:
- return _client_manager.get_default_client("discuss_async")
-
-
def get_default_file_client() -> glm.FilesServiceClient:
return _client_manager.get_default_client("file")
@@ -344,10 +364,6 @@ def get_default_generative_async_client() -> glm.GenerativeServiceAsyncClient:
return _client_manager.get_default_client("generative_async")
-def get_default_text_client() -> glm.TextServiceClient:
- return _client_manager.get_default_client("text")
-
-
def get_default_operations_client() -> operations_v1.OperationsClient:
return _client_manager.get_default_client("operations")
diff --git a/google/generativeai/discuss.py b/google/generativeai/discuss.py
deleted file mode 100644
index 448347b41..000000000
--- a/google/generativeai/discuss.py
+++ /dev/null
@@ -1,599 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from __future__ import annotations
-
-import dataclasses
-import sys
-import textwrap
-
-from typing import Iterable, List
-
-import google.ai.generativelanguage as glm
-
-from google.generativeai.client import get_default_discuss_client
-from google.generativeai.client import get_default_discuss_async_client
-from google.generativeai import string_utils
-from google.generativeai import protos
-from google.generativeai.types import discuss_types
-from google.generativeai.types import helper_types
-from google.generativeai.types import model_types
-from google.generativeai.types import palm_safety_types
-
-
-def _make_message(content: discuss_types.MessageOptions) -> protos.Message:
- """Creates a `protos.Message` object from the provided content."""
- if isinstance(content, protos.Message):
- return content
- if isinstance(content, str):
- return protos.Message(content=content)
- else:
- return protos.Message(content)
-
-
-def _make_messages(
- messages: discuss_types.MessagesOptions,
-) -> List[protos.Message]:
- """
- Creates a list of `protos.Message` objects from the provided messages.
-
- This function takes a variety of message content inputs, such as strings, dictionaries,
- or `protos.Message` objects, and creates a list of `protos.Message` objects. It ensures that
- the authors of the messages alternate appropriately. If authors are not provided,
- default authors are assigned based on their position in the list.
-
- Args:
- messages: The messages to convert.
-
- Returns:
- A list of `protos.Message` objects with alternating authors.
- """
- if isinstance(messages, (str, dict, protos.Message)):
- messages = [_make_message(messages)]
- else:
- messages = [_make_message(message) for message in messages]
-
- even_authors = set(msg.author for msg in messages[::2] if msg.author)
- if not even_authors:
- even_author = "0"
- elif len(even_authors) == 1:
- even_author = even_authors.pop()
- else:
- raise discuss_types.AuthorError(
- "Invalid sequence: Authors in the discussion must alternate strictly."
- )
-
- odd_authors = set(msg.author for msg in messages[1::2] if msg.author)
- if not odd_authors:
- odd_author = "1"
- elif len(odd_authors) == 1:
- odd_author = odd_authors.pop()
- else:
- raise discuss_types.AuthorError(
- "Invalid sequence: Authors in the discussion must alternate strictly."
- )
-
- if all(msg.author for msg in messages):
- return messages
-
- authors = [even_author, odd_author]
- for i, msg in enumerate(messages):
- msg.author = authors[i % 2]
-
- return messages
-
-
-def _make_example(item: discuss_types.ExampleOptions) -> protos.Example:
- """Creates a `protos.Example` object from the provided item."""
- if isinstance(item, protos.Example):
- return item
-
- if isinstance(item, dict):
- item = item.copy()
- item["input"] = _make_message(item["input"])
- item["output"] = _make_message(item["output"])
- return protos.Example(item)
-
- if isinstance(item, Iterable):
- input, output = list(item)
- return protos.Example(input=_make_message(input), output=_make_message(output))
-
- # try anyway
- return protos.Example(item)
-
-
-def _make_examples_from_flat(
- examples: List[discuss_types.MessageOptions],
-) -> List[protos.Example]:
- """
- Creates a list of `protos.Example` objects from a list of message options.
-
- This function takes a list of `discuss_types.MessageOptions` and pairs them into
- `protos.Example` objects. The input examples must be in pairs to create valid examples.
-
- Args:
- examples: The list of `discuss_types.MessageOptions`.
-
- Returns:
- A list of `protos.Example objects` created by pairing up the provided messages.
-
- Raises:
- ValueError: If the provided list of examples is not of even length.
- """
- if len(examples) % 2 != 0:
- raise ValueError(
- textwrap.dedent(
- f"""\
- Invalid input: You must pass either `Primer` objects, pairs of messages, or an even number of messages.
- Currently, {len(examples)} messages were provided, which is an odd number."""
- )
- )
- result = []
- pair = []
- for n, item in enumerate(examples):
- msg = _make_message(item)
- pair.append(msg)
- if n % 2 == 0:
- continue
- primer = protos.Example(
- input=pair[0],
- output=pair[1],
- )
- result.append(primer)
- pair = []
- return result
-
-
-def _make_examples(
- examples: discuss_types.ExamplesOptions,
-) -> List[protos.Example]:
- """
- Creates a list of `protos.Example` objects from the provided examples.
-
- This function takes various types of example content inputs and creates a list
- of `protos.Example` objects. It handles the conversion of different input types and ensures
- the appropriate structure for creating valid examples.
-
- Args:
- examples: The examples to convert.
-
- Returns:
- A list of `protos.Example` objects created from the provided examples.
- """
- if isinstance(examples, protos.Example):
- return [examples]
-
- if isinstance(examples, dict):
- return [_make_example(examples)]
-
- examples = list(examples)
-
- if not examples:
- return examples
-
- first = examples[0]
-
- if isinstance(first, dict):
- if "content" in first:
- # These are `Messages`
- return _make_examples_from_flat(examples)
- else:
- if not ("input" in first and "output" in first):
- raise TypeError(
- "Invalid dictionary format: To create an `Example` instance, the dictionary must contain both `input` and `output` keys."
- )
- else:
- if isinstance(first, discuss_types.MESSAGE_OPTIONS):
- return _make_examples_from_flat(examples)
-
- result = []
- for item in examples:
- result.append(_make_example(item))
- return result
-
-
-def _make_message_prompt_dict(
- prompt: discuss_types.MessagePromptOptions = None,
- *,
- context: str | None = None,
- examples: discuss_types.ExamplesOptions | None = None,
- messages: discuss_types.MessagesOptions | None = None,
-) -> protos.MessagePrompt:
- """
- Creates a `protos.MessagePrompt` object from the provided prompt components.
-
- This function constructs a `protos.MessagePrompt` object using the provided `context`, `examples`,
- or `messages`. It ensures the proper structure and handling of the input components.
-
- Either pass a `prompt` or it's component `context`, `examples`, `messages`.
-
- Args:
- prompt: The complete prompt components.
- context: The context for the prompt.
- examples: The examples for the prompt.
- messages: The messages for the prompt.
-
- Returns:
- A `protos.MessagePrompt` object created from the provided prompt components.
- """
- if prompt is None:
- prompt = dict(
- context=context,
- examples=examples,
- messages=messages,
- )
- else:
- flat_prompt = (context is not None) or (examples is not None) or (messages is not None)
- if flat_prompt:
- raise ValueError(
- "Invalid configuration: Either `prompt` or its fields `(context, examples, messages)` should be set, but not both simultaneously."
- )
- if isinstance(prompt, protos.MessagePrompt):
- return prompt
- elif isinstance(prompt, dict): # Always check dict before Iterable.
- pass
- else:
- prompt = {"messages": prompt}
-
- keys = set(prompt.keys())
- if not keys.issubset(discuss_types.MESSAGE_PROMPT_KEYS):
- raise KeyError(
- f"Invalid prompt dictionary: Extra entries found that are not recognized: {keys - discuss_types.MESSAGE_PROMPT_KEYS}. Please check the keys."
- )
-
- examples = prompt.get("examples", None)
- if examples is not None:
- prompt["examples"] = _make_examples(examples)
- messages = prompt.get("messages", None)
- if messages is not None:
- prompt["messages"] = _make_messages(messages)
-
- prompt = {k: v for k, v in prompt.items() if v is not None}
- return prompt
-
-
-def _make_message_prompt(
- prompt: discuss_types.MessagePromptOptions = None,
- *,
- context: str | None = None,
- examples: discuss_types.ExamplesOptions | None = None,
- messages: discuss_types.MessagesOptions | None = None,
-) -> protos.MessagePrompt:
- """Creates a `protos.MessagePrompt` object from the provided prompt components."""
- prompt = _make_message_prompt_dict(
- prompt=prompt, context=context, examples=examples, messages=messages
- )
- return protos.MessagePrompt(prompt)
-
-
-def _make_generate_message_request(
- *,
- model: model_types.AnyModelNameOptions | None,
- context: str | None = None,
- examples: discuss_types.ExamplesOptions | None = None,
- messages: discuss_types.MessagesOptions | None = None,
- temperature: float | None = None,
- candidate_count: int | None = None,
- top_p: float | None = None,
- top_k: float | None = None,
- prompt: discuss_types.MessagePromptOptions | None = None,
-) -> protos.GenerateMessageRequest:
- """Creates a `protos.GenerateMessageRequest` object for generating messages."""
- model = model_types.make_model_name(model)
-
- prompt = _make_message_prompt(
- prompt=prompt, context=context, examples=examples, messages=messages
- )
-
- return protos.GenerateMessageRequest(
- model=model,
- prompt=prompt,
- temperature=temperature,
- top_p=top_p,
- top_k=top_k,
- candidate_count=candidate_count,
- )
-
-
-DEFAULT_DISCUSS_MODEL = "models/chat-bison-001"
-
-
-def chat(
- *,
- model: model_types.AnyModelNameOptions | None = "models/chat-bison-001",
- context: str | None = None,
- examples: discuss_types.ExamplesOptions | None = None,
- messages: discuss_types.MessagesOptions | None = None,
- temperature: float | None = None,
- candidate_count: int | None = None,
- top_p: float | None = None,
- top_k: float | None = None,
- prompt: discuss_types.MessagePromptOptions | None = None,
- client: glm.DiscussServiceClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> discuss_types.ChatResponse:
- """Calls the API to initiate a chat with a model using provided parameters
-
- Args:
- model: Which model to call, as a string or a `types.Model`.
- context: Text that should be provided to the model first, to ground the response.
-
- If not empty, this `context` will be given to the model first before the
- `examples` and `messages`.
-
- This field can be a description of your prompt to the model to help provide
- context and guide the responses.
-
- Examples:
-
- * "Translate the phrase from English to French."
- * "Given a statement, classify the sentiment as happy, sad or neutral."
-
- Anything included in this field will take precedence over history in `messages`
- if the total input size exceeds the model's `Model.input_token_limit`.
- examples: Examples of what the model should generate.
-
- This includes both the user input and the response that the model should
- emulate.
-
- These `examples` are treated identically to conversation messages except
- that they take precedence over the history in `messages`:
- If the total input size exceeds the model's `input_token_limit` the input
- will be truncated. Items will be dropped from `messages` before `examples`
- messages: A snapshot of the conversation history sorted chronologically.
-
- Turns alternate between two authors.
-
- If the total input size exceeds the model's `input_token_limit` the input
- will be truncated: The oldest items will be dropped from `messages`.
- temperature: Controls the randomness of the output. Must be positive.
-
- Typical values are in the range: `[0.0,1.0]`. Higher values produce a
- more random and varied response. A temperature of zero will be deterministic.
- candidate_count: The **maximum** number of generated response messages to return.
-
- This value must be between `[1, 8]`, inclusive. If unset, this
- will default to `1`.
-
- Note: Only unique candidates are returned. Higher temperatures are more
- likely to produce unique candidates. Setting `temperature=0.0` will always
- return 1 candidate regardless of the `candidate_count`.
- top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
- top-k sampling.
-
- `top_k` sets the maximum number of tokens to sample from on each step.
- top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and
- top-k sampling.
-
- `top_p` configures the nucleus sampling. It sets the maximum cumulative
- probability of tokens to sample from.
-
- For example, if the sorted probabilities are
- `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
- as `[0.625, 0.25, 0.125, 0, 0, 0]`.
-
- Typical values are in the `[0.9, 1.0]` range.
- prompt: You may pass a `types.MessagePromptOptions` **instead** of a
- setting `context`/`examples`/`messages`, but not both.
- client: If you're not relying on the default client, you pass a
- `glm.DiscussServiceClient` instead.
- request_options: Options for the request.
-
- Returns:
- A `types.ChatResponse` containing the model's reply.
- """
- request = _make_generate_message_request(
- model=model,
- context=context,
- examples=examples,
- messages=messages,
- temperature=temperature,
- candidate_count=candidate_count,
- top_p=top_p,
- top_k=top_k,
- prompt=prompt,
- )
-
- return _generate_response(client=client, request=request, request_options=request_options)
-
-
-@string_utils.set_doc(chat.__doc__)
-async def chat_async(
- *,
- model: model_types.AnyModelNameOptions | None = "models/chat-bison-001",
- context: str | None = None,
- examples: discuss_types.ExamplesOptions | None = None,
- messages: discuss_types.MessagesOptions | None = None,
- temperature: float | None = None,
- candidate_count: int | None = None,
- top_p: float | None = None,
- top_k: float | None = None,
- prompt: discuss_types.MessagePromptOptions | None = None,
- client: glm.DiscussServiceAsyncClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> discuss_types.ChatResponse:
- """Calls the API asynchronously to initiate a chat with a model using provided parameters"""
- request = _make_generate_message_request(
- model=model,
- context=context,
- examples=examples,
- messages=messages,
- temperature=temperature,
- candidate_count=candidate_count,
- top_p=top_p,
- top_k=top_k,
- prompt=prompt,
- )
-
- return await _generate_response_async(
- client=client, request=request, request_options=request_options
- )
-
-
-if (sys.version_info.major, sys.version_info.minor) >= (3, 10):
- DATACLASS_KWARGS = {"kw_only": True}
-else:
- DATACLASS_KWARGS = {}
-
-
-@string_utils.prettyprint
-@string_utils.set_doc(discuss_types.ChatResponse.__doc__)
-@dataclasses.dataclass(**DATACLASS_KWARGS, init=False)
-class ChatResponse(discuss_types.ChatResponse):
- _client: glm.DiscussServiceClient | None = dataclasses.field(default=lambda: None, repr=False)
-
- def __init__(self, **kwargs):
- for key, value in kwargs.items():
- setattr(self, key, value)
-
- @property
- @string_utils.set_doc(discuss_types.ChatResponse.last.__doc__)
- def last(self) -> str | None:
- if self.messages[-1]:
- return self.messages[-1]["content"]
- else:
- return None
-
- @last.setter
- def last(self, message: discuss_types.MessageOptions):
- message = _make_message(message)
- message = type(message).to_dict(message)
- self.messages[-1] = message
-
- @string_utils.set_doc(discuss_types.ChatResponse.reply.__doc__)
- def reply(
- self,
- message: discuss_types.MessageOptions,
- request_options: helper_types.RequestOptionsType | None = None,
- ) -> discuss_types.ChatResponse:
- if isinstance(self._client, glm.DiscussServiceAsyncClient):
- raise TypeError(
- "Invalid operation: The 'reply' method cannot be called on an asynchronous client. Please use the 'reply_async' method instead."
- )
- if self.last is None:
- raise ValueError(
- f"Invalid operation: No candidates returned from the model's last response. "
- f"Please inspect the '.filters' attribute to understand why responses were filtered out. Current filters: {self.filters}"
- )
-
- request = self.to_dict()
- request.pop("candidates")
- request.pop("filters", None)
- request["messages"] = list(request["messages"])
- request["messages"].append(_make_message(message))
- request = _make_generate_message_request(**request)
- return _generate_response(
- request=request, client=self._client, request_options=request_options
- )
-
- @string_utils.set_doc(discuss_types.ChatResponse.reply.__doc__)
- async def reply_async(
- self, message: discuss_types.MessageOptions
- ) -> discuss_types.ChatResponse:
- if isinstance(self._client, glm.DiscussServiceClient):
- raise TypeError(
- "Invalid method call: `reply_async` is not supported on a non-async client. Please use the `reply` method instead."
- )
- request = self.to_dict()
- request.pop("candidates")
- request.pop("filters", None)
- request["messages"] = list(request["messages"])
- request["messages"].append(_make_message(message))
- request = _make_generate_message_request(**request)
- return await _generate_response_async(request=request, client=self._client)
-
-
-def _build_chat_response(
- request: protos.GenerateMessageRequest,
- response: protos.GenerateMessageResponse,
- client: glm.DiscussServiceClient | protos.DiscussServiceAsyncClient,
-) -> ChatResponse:
- request = type(request).to_dict(request)
- prompt = request.pop("prompt")
- request["examples"] = prompt["examples"]
- request["context"] = prompt["context"]
- request["messages"] = prompt["messages"]
-
- response = type(response).to_dict(response)
- response.pop("messages")
-
- response["filters"] = palm_safety_types.convert_filters_to_enums(response["filters"])
-
- if response["candidates"]:
- last = response["candidates"][0]
- else:
- last = None
- request["messages"].append(last)
- request.setdefault("temperature", None)
- request.setdefault("candidate_count", None)
-
- return ChatResponse(_client=client, **response, **request) # pytype: disable=missing-parameter
-
-
-def _generate_response(
- request: protos.GenerateMessageRequest,
- client: glm.DiscussServiceClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> ChatResponse:
- if request_options is None:
- request_options = {}
-
- if client is None:
- client = get_default_discuss_client()
-
- response = client.generate_message(request, **request_options)
-
- return _build_chat_response(request, response, client)
-
-
-async def _generate_response_async(
- request: protos.GenerateMessageRequest,
- client: glm.DiscussServiceAsyncClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> ChatResponse:
- if request_options is None:
- request_options = {}
-
- if client is None:
- client = get_default_discuss_async_client()
-
- response = await client.generate_message(request, **request_options)
-
- return _build_chat_response(request, response, client)
-
-
-def count_message_tokens(
- *,
- prompt: discuss_types.MessagePromptOptions = None,
- context: str | None = None,
- examples: discuss_types.ExamplesOptions | None = None,
- messages: discuss_types.MessagesOptions | None = None,
- model: model_types.AnyModelNameOptions = DEFAULT_DISCUSS_MODEL,
- client: glm.DiscussServiceAsyncClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> discuss_types.TokenCount:
- """Calls the API to calculate the number of tokens used in the prompt."""
-
- model = model_types.make_model_name(model)
- prompt = _make_message_prompt(prompt, context=context, examples=examples, messages=messages)
-
- if request_options is None:
- request_options = {}
-
- if client is None:
- client = get_default_discuss_client()
-
- result = client.count_message_tokens(model=model, prompt=prompt, **request_options)
-
- return type(result).to_dict(result)
diff --git a/google/generativeai/embedding.py b/google/generativeai/embedding.py
index 616fa07bf..15645c792 100644
--- a/google/generativeai/embedding.py
+++ b/google/generativeai/embedding.py
@@ -24,8 +24,8 @@
from google.generativeai.client import get_default_generative_async_client
from google.generativeai.types import helper_types
-from google.generativeai.types import text_types
from google.generativeai.types import model_types
+from google.generativeai.types import text_types
from google.generativeai.types import content_types
DEFAULT_EMB_MODEL = "models/embedding-001"
diff --git a/google/generativeai/files.py b/google/generativeai/files.py
index c0d8e1e0a..b2581bdcd 100644
--- a/google/generativeai/files.py
+++ b/google/generativeai/files.py
@@ -21,6 +21,7 @@
import logging
from google.generativeai import protos
from itertools import islice
+from io import IOBase
from google.generativeai.types import file_types
@@ -32,7 +33,7 @@
def upload_file(
- path: str | pathlib.Path | os.PathLike,
+ path: str | pathlib.Path | os.PathLike | IOBase,
*,
mime_type: str | None = None,
name: str | None = None,
@@ -42,7 +43,7 @@ def upload_file(
"""Calls the API to upload a file using a supported file service.
Args:
- path: The path to the file to be uploaded.
+ path: The path to the file or a file-like object (e.g., BytesIO) to be uploaded.
mime_type: The MIME type of the file. If not provided, it will be
inferred from the file extension.
name: The name of the file in the destination (e.g., 'files/sample-image').
@@ -57,17 +58,30 @@ def upload_file(
"""
client = get_default_file_client()
- path = pathlib.Path(os.fspath(path))
+ if isinstance(path, IOBase):
+ if mime_type is None:
+ raise ValueError(
+ "Unknown mime type: When passing a file like object to `path` (instead of a\n"
+ " path-like object) you must set the `mime_type` argument"
+ )
+ else:
+ path = pathlib.Path(os.fspath(path))
- if mime_type is None:
- mime_type, _ = mimetypes.guess_type(path)
+ if display_name is None:
+ display_name = path.name
+
+ if mime_type is None:
+ mime_type, _ = mimetypes.guess_type(path)
+
+ if mime_type is None:
+ raise ValueError(
+ "Unknown mime type: Could not determine the mimetype for your file\n"
+ " please set the `mime_type` argument"
+ )
if name is not None and "/" not in name:
name = f"files/{name}"
- if display_name is None:
- display_name = path.name
-
response = client.create_file(
path=path, mime_type=mime_type, name=name, display_name=display_name, resumable=resumable
)
diff --git a/google/generativeai/generative_models.py b/google/generativeai/generative_models.py
index 50b15261a..8d331a9f6 100644
--- a/google/generativeai/generative_models.py
+++ b/google/generativeai/generative_models.py
@@ -36,14 +36,14 @@ class GenerativeModel:
>>> import google.generativeai as genai
>>> import PIL.Image
>>> genai.configure(api_key='YOUR_API_KEY')
- >>> model = genai.GenerativeModel('models/gemini-pro')
+ >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> result = model.generate_content('Tell me a story about a magic backpack')
>>> result.text
"In the quaint little town of Lakeside, there lived a young girl named Lily..."
Multimodal input:
- >>> model = genai.GenerativeModel('models/gemini-pro')
+ >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> result = model.generate_content([
... "Give me a recipe for these:", PIL.Image.open('scones.jpeg')])
>>> result.text
@@ -72,7 +72,7 @@ class GenerativeModel:
def __init__(
self,
- model_name: str = "gemini-pro",
+ model_name: str = "gemini-1.5-flash-002",
safety_settings: safety_types.SafetySettingOptions | None = None,
generation_config: generation_types.GenerationConfigType | None = None,
tools: content_types.FunctionLibraryType | None = None,
@@ -250,7 +250,7 @@ def generate_content(
This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn
conversations.
- >>> model = genai.GenerativeModel('models/gemini-pro')
+ >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> response = model.generate_content('Tell me a story about a magic backpack')
>>> response.text
@@ -481,7 +481,7 @@ def start_chat(
class ChatSession:
"""Contains an ongoing conversation with the model.
- >>> model = genai.GenerativeModel('models/gemini-pro')
+ >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> chat = model.start_chat()
>>> response = chat.send_message("Hello")
>>> print(response.text)
@@ -524,7 +524,7 @@ def send_message(
Appends the request and response to the conversation history.
- >>> model = genai.GenerativeModel('models/gemini-pro')
+ >>> model = genai.GenerativeModel('models/gemini-1.5-flash')
>>> chat = model.start_chat()
>>> response = chat.send_message("Hello")
>>> print(response.text)
diff --git a/google/generativeai/models.py b/google/generativeai/models.py
index 4b95a2470..b23a7ce88 100644
--- a/google/generativeai/models.py
+++ b/google/generativeai/models.py
@@ -40,7 +40,7 @@ def get_model(
```
import pprint
- model = genai.get_model('models/gemini-pro')
+ model = genai.get_model('models/gemini-1.5-flash')
pprint.pprint(model)
```
@@ -112,7 +112,7 @@ def get_tuned_model(
```
import pprint
- model = genai.get_tuned_model('tunedModels/gemini-1.0-pro-001')
+ model = genai.get_tuned_model('tunedModels/gemini-1.5-flash')
pprint.pprint(model)
```
diff --git a/google/generativeai/notebook/cmd_line_parser.py b/google/generativeai/notebook/cmd_line_parser.py
index 7005bf600..9b8e84048 100644
--- a/google/generativeai/notebook/cmd_line_parser.py
+++ b/google/generativeai/notebook/cmd_line_parser.py
@@ -373,15 +373,10 @@ def _create_parser(
epilog = ""
# Commands
- extra_args = {}
- if sys.version_info[0:2] >= (3, 9):
- extra_args["exit_on_error"] = False
-
parser = argument_parser.ArgumentParser(
prog=system_name,
description=description,
epilog=epilog,
- **extra_args,
)
subparsers = parser.add_subparsers(dest="cmd")
_create_run_parser(
diff --git a/google/generativeai/notebook/command_utils.py b/google/generativeai/notebook/command_utils.py
index 355592c21..f4432c0c2 100644
--- a/google/generativeai/notebook/command_utils.py
+++ b/google/generativeai/notebook/command_utils.py
@@ -106,7 +106,7 @@ def create_llm_function(
def _convert_simple_compare_fn(
- name_and_simple_fn: tuple[str, Callable[[str, str], Any]]
+ name_and_simple_fn: tuple[str, Callable[[str, str], Any]],
) -> tuple[str, llm_function.CompareFn]:
simple_fn = name_and_simple_fn[1]
new_fn = lambda x, y: simple_fn(x.result_value(), y.result_value())
diff --git a/google/generativeai/notebook/lib/llm_function.py b/google/generativeai/notebook/lib/llm_function.py
index c3eb7b52d..c4f379828 100644
--- a/google/generativeai/notebook/lib/llm_function.py
+++ b/google/generativeai/notebook/lib/llm_function.py
@@ -64,7 +64,7 @@ def _convert_compare_fn_to_batch_add_fn(
llmfn_output_row.LLMFnOutputRowView,
],
Any,
- ]
+ ],
) -> llmfn_post_process.LLMCompareFnPostProcessBatchAddFn:
"""Vectorize a single-row-based comparison function."""
@@ -74,7 +74,7 @@ def _fn(
llmfn_output_row.LLMFnOutputRowView,
llmfn_output_row.LLMFnOutputRowView,
]
- ]
+ ],
) -> Sequence[Any]:
return [fn(lhs, rhs) for lhs, rhs in lhs_and_rhs_rows]
diff --git a/google/generativeai/notebook/text_model.py b/google/generativeai/notebook/text_model.py
index 38375e348..7360bbfbd 100644
--- a/google/generativeai/notebook/text_model.py
+++ b/google/generativeai/notebook/text_model.py
@@ -20,7 +20,7 @@
from google.generativeai.types import generation_types
from google.generativeai.notebook.lib import model as model_lib
-_DEFAULT_MODEL = "models/gemini-pro"
+_DEFAULT_MODEL = "models/gemini-1.5-flash"
class TextModel(model_lib.AbstractModel):
diff --git a/google/generativeai/py.typed b/google/generativeai/py.typed
new file mode 100644
index 000000000..d57989efb
--- /dev/null
+++ b/google/generativeai/py.typed
@@ -0,0 +1 @@
+# see: https://peps.python.org/pep-0561/
diff --git a/google/generativeai/responder.py b/google/generativeai/responder.py
index bb85167ad..70a58cf5b 100644
--- a/google/generativeai/responder.py
+++ b/google/generativeai/responder.py
@@ -23,6 +23,7 @@
import pydantic
from google.generativeai import protos
+from google.generativeai.types import content_types
Type = protos.Type
@@ -89,52 +90,36 @@ def _generate_schema(
"""
if descriptions is None:
descriptions = {}
- if required is None:
- required = []
defaults = dict(inspect.signature(f).parameters)
- fields_dict = {
- name: (
- # 1. We infer the argument type here: use Any rather than None so
- # it will not try to auto-infer the type based on the default value.
- (param.annotation if param.annotation != inspect.Parameter.empty else Any),
- pydantic.Field(
- # 2. We do not support default values for now.
- # default=(
- # param.default if param.default != inspect.Parameter.empty
- # else None
- # ),
- # 3. We support user-provided descriptions.
- description=descriptions.get(name, None),
- ),
- )
- for name, param in defaults.items()
- # We do not support *args or **kwargs
- if param.kind
- in (
+
+ fields_dict = {}
+ for name, param in defaults.items():
+ if param.kind in (
inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY,
inspect.Parameter.POSITIONAL_ONLY,
- )
- }
- parameters = pydantic.create_model(f.__name__, **fields_dict).schema()
- # Postprocessing
- # 4. Suppress unnecessary title generation:
- # * https://github.com/pydantic/pydantic/issues/1051
- # * http://cl/586221780
- parameters.pop("title", None)
- for name, function_arg in parameters.get("properties", {}).items():
- function_arg.pop("title", None)
- annotation = defaults[name].annotation
- # 5. Nullable fields:
- # * https://github.com/pydantic/pydantic/issues/1270
- # * https://stackoverflow.com/a/58841311
- # * https://github.com/pydantic/pydantic/discussions/4872
- if typing.get_origin(annotation) is typing.Union and type(None) in typing.get_args(
- annotation
):
- function_arg["nullable"] = True
+ # We do not support default values for now.
+ # default=(
+ # param.default if param.default != inspect.Parameter.empty
+ # else None
+ # ),
+ field = pydantic.Field(
+ # We support user-provided descriptions.
+ description=descriptions.get(name, None)
+ )
+
+ # 1. We infer the argument type here: use Any rather than None so
+ # it will not try to auto-infer the type based on the default value.
+ if param.annotation != inspect.Parameter.empty:
+ fields_dict[name] = param.annotation, field
+ else:
+ fields_dict[name] = Any, field
+
+ parameters = _build_schema(f.__name__, fields_dict)
+
# 6. Annotate required fields.
- if required:
+ if required is not None:
# We use the user-provided "required" fields if specified.
parameters["required"] = required
else:
@@ -152,10 +137,138 @@ def _generate_schema(
)
)
]
- schema = dict(name=f.__name__, description=f.__doc__, parameters=parameters)
+ schema = dict(name=f.__name__, description=f.__doc__)
+ if parameters["properties"]:
+ schema["parameters"] = parameters
+
return schema
+def _build_schema(fname, fields_dict):
+ parameters = pydantic.create_model(fname, **fields_dict).model_json_schema()
+ defs = parameters.pop("$defs", {})
+ # flatten the defs
+ for name, value in defs.items():
+ unpack_defs(value, defs)
+ unpack_defs(parameters, defs)
+
+ # 5. Nullable fields:
+ # * https://github.com/pydantic/pydantic/issues/1270
+ # * https://stackoverflow.com/a/58841311
+ # * https://github.com/pydantic/pydantic/discussions/4872
+ convert_to_nullable(parameters)
+ add_object_type(parameters)
+ # Postprocessing
+ # 4. Suppress unnecessary title generation:
+ # * https://github.com/pydantic/pydantic/issues/1051
+ # * http://cl/586221780
+ strip_titles(parameters)
+ strip_additional_properties(parameters)
+ return parameters
+
+
+def unpack_defs(schema, defs):
+ properties = schema.get("properties", None)
+ if properties is None:
+ return
+
+ for name, value in properties.items():
+ ref_key = value.get("$ref", None)
+ if ref_key is not None:
+ ref = defs[ref_key.split("defs/")[-1]]
+ unpack_defs(ref, defs)
+ properties[name] = ref
+ continue
+
+ anyof = value.get("anyOf", None)
+ if anyof is not None:
+ for i, atype in enumerate(anyof):
+ ref_key = atype.get("$ref", None)
+ if ref_key is not None:
+ ref = defs[ref_key.split("defs/")[-1]]
+ unpack_defs(ref, defs)
+ anyof[i] = ref
+ continue
+
+ items = value.get("items", None)
+ if items is not None:
+ ref_key = items.get("$ref", None)
+ if ref_key is not None:
+ ref = defs[ref_key.split("defs/")[-1]]
+ unpack_defs(ref, defs)
+ value["items"] = ref
+ continue
+
+
+def strip_titles(schema):
+ title = schema.pop("title", None)
+
+ properties = schema.get("properties", None)
+ if properties is not None:
+ for name, value in properties.items():
+ strip_titles(value)
+
+ items = schema.get("items", None)
+ if items is not None:
+ strip_titles(items)
+
+
+def strip_additional_properties(schema):
+ schema.pop("additionalProperties", None)
+
+ properties = schema.get("properties", None)
+ if properties is not None:
+ for name, value in properties.items():
+ strip_additional_properties(value)
+
+ items = schema.get("items", None)
+ if items is not None:
+ strip_additional_properties(items)
+
+
+def add_object_type(schema):
+ properties = schema.get("properties", None)
+ if properties is not None:
+ schema.pop("required", None)
+ schema["type"] = "object"
+ for name, value in properties.items():
+ add_object_type(value)
+
+ items = schema.get("items", None)
+ if items is not None:
+ add_object_type(items)
+
+
+def convert_to_nullable(schema):
+ anyof = schema.pop("anyOf", None)
+ if anyof is not None:
+ if len(anyof) != 2:
+ raise ValueError(
+ "Invalid input: Type Unions are not supported, except for `Optional` types. "
+ "Please provide an `Optional` type or a non-Union type."
+ )
+ a, b = anyof
+ if a == {"type": "null"}:
+ schema.update(b)
+ elif b == {"type": "null"}:
+ schema.update(a)
+ else:
+ raise ValueError(
+ "Invalid input: Type Unions are not supported, except for `Optional` types. "
+ "Please provide an `Optional` type or a non-Union type."
+ )
+ schema["nullable"] = True
+
+ properties = schema.get("properties", None)
+ if properties is not None:
+ for name, value in properties.items():
+ convert_to_nullable(value)
+
+ items = schema.get("items", None)
+ if items is not None:
+ convert_to_nullable(items)
+
+
def _rename_schema_fields(schema: dict[str, Any]):
if schema is None:
return schema
diff --git a/google/generativeai/text.py b/google/generativeai/text.py
deleted file mode 100644
index 2a6267661..000000000
--- a/google/generativeai/text.py
+++ /dev/null
@@ -1,347 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from __future__ import annotations
-
-import dataclasses
-from collections.abc import Iterable, Sequence
-import itertools
-from typing import Any, Iterable, overload, TypeVar
-
-import google.ai.generativelanguage as glm
-
-from google.generativeai import protos
-
-from google.generativeai.client import get_default_text_client
-from google.generativeai import string_utils
-from google.generativeai.types import helper_types
-from google.generativeai.types import text_types
-from google.generativeai.types import model_types
-from google.generativeai import models
-from google.generativeai.types import palm_safety_types
-
-DEFAULT_TEXT_MODEL = "models/text-bison-001"
-EMBEDDING_MAX_BATCH_SIZE = 100
-
-try:
- # python 3.12+
- _batched = itertools.batched # type: ignore
-except AttributeError:
- T = TypeVar("T")
-
- def _batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]:
- if n < 1:
- raise ValueError(f"Batch size `n` must be >1, got: {n}")
- batch = []
- for item in iterable:
- batch.append(item)
- if len(batch) == n:
- yield batch
- batch = []
-
- if batch:
- yield batch
-
-
-def _make_text_prompt(prompt: str | dict[str, str]) -> protos.TextPrompt:
- """
- Creates a `protos.TextPrompt` object based on the provided prompt input.
-
- Args:
- prompt: The prompt input, either a string or a dictionary.
-
- Returns:
- protos.TextPrompt: A TextPrompt object containing the prompt text.
-
- Raises:
- TypeError: If the provided prompt is neither a string nor a dictionary.
- """
- if isinstance(prompt, str):
- return protos.TextPrompt(text=prompt)
- elif isinstance(prompt, dict):
- return protos.TextPrompt(prompt)
- else:
- raise TypeError(
- "Invalid argument type: Expected a string or dictionary for the text prompt."
- )
-
-
-def _make_generate_text_request(
- *,
- model: model_types.AnyModelNameOptions = DEFAULT_TEXT_MODEL,
- prompt: str | None = None,
- temperature: float | None = None,
- candidate_count: int | None = None,
- max_output_tokens: int | None = None,
- top_p: int | None = None,
- top_k: int | None = None,
- safety_settings: palm_safety_types.SafetySettingOptions | None = None,
- stop_sequences: str | Iterable[str] | None = None,
-) -> protos.GenerateTextRequest:
- """
- Creates a `protos.GenerateTextRequest` object based on the provided parameters.
-
- This function generates a `protos.GenerateTextRequest` object with the specified
- parameters. It prepares the input parameters and creates a request that can be
- used for generating text using the chosen model.
-
- Args:
- model: The model to use for text generation.
- prompt: The prompt for text generation. Defaults to None.
- temperature: The temperature for randomness in generation. Defaults to None.
- candidate_count: The number of candidates to consider. Defaults to None.
- max_output_tokens: The maximum number of output tokens. Defaults to None.
- top_p: The nucleus sampling probability threshold. Defaults to None.
- top_k: The top-k sampling parameter. Defaults to None.
- safety_settings: Safety settings for generated text. Defaults to None.
- stop_sequences: Stop sequences to halt text generation. Can be a string
- or iterable of strings. Defaults to None.
-
- Returns:
- `protos.GenerateTextRequest`: A `GenerateTextRequest` object configured with the specified parameters.
- """
- model = model_types.make_model_name(model)
- prompt = _make_text_prompt(prompt=prompt)
- safety_settings = palm_safety_types.normalize_safety_settings(safety_settings)
- if isinstance(stop_sequences, str):
- stop_sequences = [stop_sequences]
- if stop_sequences:
- stop_sequences = list(stop_sequences)
-
- return protos.GenerateTextRequest(
- model=model,
- prompt=prompt,
- temperature=temperature,
- candidate_count=candidate_count,
- max_output_tokens=max_output_tokens,
- top_p=top_p,
- top_k=top_k,
- safety_settings=safety_settings,
- stop_sequences=stop_sequences,
- )
-
-
-def generate_text(
- *,
- model: model_types.AnyModelNameOptions = DEFAULT_TEXT_MODEL,
- prompt: str,
- temperature: float | None = None,
- candidate_count: int | None = None,
- max_output_tokens: int | None = None,
- top_p: float | None = None,
- top_k: float | None = None,
- safety_settings: palm_safety_types.SafetySettingOptions | None = None,
- stop_sequences: str | Iterable[str] | None = None,
- client: glm.TextServiceClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> text_types.Completion:
- """Calls the API to generate text based on the provided prompt.
-
- Args:
- model: Which model to call, as a string or a `types.Model`.
- prompt: Free-form input text given to the model. Given a prompt, the model will
- generate text that completes the input text.
- temperature: Controls the randomness of the output. Must be positive.
- Typical values are in the range: `[0.0,1.0]`. Higher values produce a
- more random and varied response. A temperature of zero will be deterministic.
- candidate_count: The **maximum** number of generated response messages to return.
- This value must be between `[1, 8]`, inclusive. If unset, this
- will default to `1`.
-
- Note: Only unique candidates are returned. Higher temperatures are more
- likely to produce unique candidates. Setting `temperature=0.0` will always
- return 1 candidate regardless of the `candidate_count`.
- max_output_tokens: Maximum number of tokens to include in a candidate. Must be greater
- than zero. If unset, will default to 64.
- top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
- `top_k` sets the maximum number of tokens to sample from on each step.
- top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
- `top_p` configures the nucleus sampling. It sets the maximum cumulative
- probability of tokens to sample from.
- For example, if the sorted probabilities are
- `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
- as `[0.625, 0.25, 0.125, 0, 0, 0]`.
- safety_settings: A list of unique `types.SafetySetting` instances for blocking unsafe content.
- These will be enforced on the `prompt` and
- `candidates`. There should not be more than one
- setting for each `types.SafetyCategory` type. The API will block any prompts and
- responses that fail to meet the thresholds set by these settings. This list
- overrides the default settings for each `SafetyCategory` specified in the
- safety_settings. If there is no `types.SafetySetting` for a given
- `SafetyCategory` provided in the list, the API will use the default safety
- setting for that category.
- stop_sequences: A set of up to 5 character sequences that will stop output generation.
- If specified, the API will stop at the first appearance of a stop
- sequence. The stop sequence will not be included as part of the response.
- client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
- request_options: Options for the request.
-
- Returns:
- A `types.Completion` containing the model's text completion response.
- """
- request = _make_generate_text_request(
- model=model,
- prompt=prompt,
- temperature=temperature,
- candidate_count=candidate_count,
- max_output_tokens=max_output_tokens,
- top_p=top_p,
- top_k=top_k,
- safety_settings=safety_settings,
- stop_sequences=stop_sequences,
- )
-
- return _generate_response(client=client, request=request, request_options=request_options)
-
-
-@string_utils.prettyprint
-@dataclasses.dataclass(init=False)
-class Completion(text_types.Completion):
- def __init__(self, **kwargs):
- for key, value in kwargs.items():
- setattr(self, key, value)
-
- self.result = None
- if self.candidates:
- self.result = self.candidates[0]["output"]
-
-
-def _generate_response(
- request: protos.GenerateTextRequest,
- client: glm.TextServiceClient = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> Completion:
- """
- Generates a response using the provided `protos.GenerateTextRequest` and client.
-
- Args:
- request: The text generation request.
- client: The client to use for text generation. Defaults to None, in which
- case the default text client is used.
- request_options: Options for the request.
-
- Returns:
- `Completion`: A `Completion` object with the generated text and response information.
- """
- if request_options is None:
- request_options = {}
-
- if client is None:
- client = get_default_text_client()
-
- response = client.generate_text(request, **request_options)
- response = type(response).to_dict(response)
-
- response["filters"] = palm_safety_types.convert_filters_to_enums(response["filters"])
- response["safety_feedback"] = palm_safety_types.convert_safety_feedback_to_enums(
- response["safety_feedback"]
- )
- response["candidates"] = palm_safety_types.convert_candidate_enums(response["candidates"])
-
- return Completion(_client=client, **response)
-
-
-def count_text_tokens(
- model: model_types.AnyModelNameOptions,
- prompt: str,
- client: glm.TextServiceClient | None = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> text_types.TokenCount:
- """Calls the API to count the number of tokens in the text prompt."""
-
- base_model = models.get_base_model_name(model)
-
- if request_options is None:
- request_options = {}
-
- if client is None:
- client = get_default_text_client()
-
- result = client.count_text_tokens(
- protos.CountTextTokensRequest(model=base_model, prompt={"text": prompt}),
- **request_options,
- )
-
- return type(result).to_dict(result)
-
-
-@overload
-def generate_embeddings(
- model: model_types.BaseModelNameOptions,
- text: str,
- client: glm.TextServiceClient = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> text_types.EmbeddingDict: ...
-
-
-@overload
-def generate_embeddings(
- model: model_types.BaseModelNameOptions,
- text: Sequence[str],
- client: glm.TextServiceClient = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> text_types.BatchEmbeddingDict: ...
-
-
-def generate_embeddings(
- model: model_types.BaseModelNameOptions,
- text: str | Sequence[str],
- client: glm.TextServiceClient = None,
- request_options: helper_types.RequestOptionsType | None = None,
-) -> text_types.EmbeddingDict | text_types.BatchEmbeddingDict:
- """Calls the API to create an embedding for the text passed in.
-
- Args:
- model: Which model to call, as a string or a `types.Model`.
-
- text: Free-form input text given to the model. Given a string, the model will
- generate an embedding based on the input text.
-
- client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.
-
- request_options: Options for the request.
-
- Returns:
- Dictionary containing the embedding (list of float values) for the input text.
- """
- model = model_types.make_model_name(model)
-
- if request_options is None:
- request_options = {}
-
- if client is None:
- client = get_default_text_client()
-
- if isinstance(text, str):
- embedding_request = protos.EmbedTextRequest(model=model, text=text)
- embedding_response = client.embed_text(
- embedding_request,
- **request_options,
- )
- embedding_dict = type(embedding_response).to_dict(embedding_response)
- embedding_dict["embedding"] = embedding_dict["embedding"]["value"]
- else:
- result = {"embedding": []}
- for batch in _batched(text, EMBEDDING_MAX_BATCH_SIZE):
- # TODO(markdaoust): This could use an option for returning an iterator or wait-bar.
- embedding_request = protos.BatchEmbedTextRequest(model=model, texts=batch)
- embedding_response = client.batch_embed_text(
- embedding_request,
- **request_options,
- )
- embedding_dict = type(embedding_response).to_dict(embedding_response)
- result["embedding"].extend(e["value"] for e in embedding_dict["embeddings"])
- return result
-
- return embedding_dict
diff --git a/google/generativeai/types/__init__.py b/google/generativeai/types/__init__.py
index 0acfb1397..1e7853746 100644
--- a/google/generativeai/types/__init__.py
+++ b/google/generativeai/types/__init__.py
@@ -16,18 +16,14 @@
from google.generativeai.types.citation_types import *
from google.generativeai.types.content_types import *
-from google.generativeai.types.discuss_types import *
from google.generativeai.types.file_types import *
from google.generativeai.types.generation_types import *
from google.generativeai.types.helper_types import *
from google.generativeai.types.model_types import *
from google.generativeai.types.permission_types import *
from google.generativeai.types.safety_types import *
-from google.generativeai.types.text_types import *
-del discuss_types
del model_types
-del text_types
del citation_types
del safety_types
diff --git a/google/generativeai/types/content_types.py b/google/generativeai/types/content_types.py
index e2e2b680d..80f60d2b2 100644
--- a/google/generativeai/types/content_types.py
+++ b/google/generativeai/types/content_types.py
@@ -19,6 +19,7 @@
import io
import inspect
import mimetypes
+import pathlib
import typing
from typing import Any, Callable, Union
from typing_extensions import TypedDict
@@ -30,7 +31,7 @@
if typing.TYPE_CHECKING:
import PIL.Image
- import PIL.PngImagePlugin
+ import PIL.ImageFile
import IPython.display
IMAGE_TYPES = (PIL.Image.Image, IPython.display.Image)
@@ -38,7 +39,7 @@
IMAGE_TYPES = ()
try:
import PIL.Image
- import PIL.PngImagePlugin
+ import PIL.ImageFile
IMAGE_TYPES = IMAGE_TYPES + (PIL.Image.Image,)
except ImportError:
@@ -71,24 +72,61 @@
"FunctionLibraryType",
]
+Mode = protos.DynamicRetrievalConfig.Mode
-def pil_to_blob(img):
- bytesio = io.BytesIO()
- if isinstance(img, PIL.PngImagePlugin.PngImageFile) or img.mode == "RGBA":
- img.save(bytesio, format="PNG")
- mime_type = "image/png"
- else:
- img.save(bytesio, format="JPEG")
- mime_type = "image/jpeg"
- bytesio.seek(0)
- data = bytesio.read()
- return protos.Blob(mime_type=mime_type, data=data)
+ModeOptions = Union[int, str, Mode]
+
+_MODE: dict[ModeOptions, Mode] = {
+ Mode.MODE_UNSPECIFIED: Mode.MODE_UNSPECIFIED,
+ 0: Mode.MODE_UNSPECIFIED,
+ "mode_unspecified": Mode.MODE_UNSPECIFIED,
+ "unspecified": Mode.MODE_UNSPECIFIED,
+ Mode.MODE_DYNAMIC: Mode.MODE_DYNAMIC,
+ 1: Mode.MODE_DYNAMIC,
+ "mode_dynamic": Mode.MODE_DYNAMIC,
+ "dynamic": Mode.MODE_DYNAMIC,
+}
+
+
+def to_mode(x: ModeOptions) -> Mode:
+ if isinstance(x, str):
+ x = x.lower()
+ return _MODE[x]
+
+
+def _pil_to_blob(image: PIL.Image.Image) -> protos.Blob:
+ # If the image is a local file, return a file-based blob without any modification.
+ # Otherwise, return a lossless WebP blob (same quality with optimized size).
+ def file_blob(image: PIL.Image.Image) -> protos.Blob | None:
+ if not isinstance(image, PIL.ImageFile.ImageFile) or image.filename is None:
+ return None
+ filename = str(image.filename)
+ if not pathlib.Path(filename).is_file():
+ return None
+
+ mime_type = image.get_format_mimetype()
+ image_bytes = pathlib.Path(filename).read_bytes()
+
+ return protos.Blob(mime_type=mime_type, data=image_bytes)
+
+ def webp_blob(image: PIL.Image.Image) -> protos.Blob:
+ # Reference: https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#webp
+ image_io = io.BytesIO()
+ image.save(image_io, format="webp", lossless=True)
+ image_io.seek(0)
+
+ mime_type = "image/webp"
+ image_bytes = image_io.read()
+
+ return protos.Blob(mime_type=mime_type, data=image_bytes)
+
+ return file_blob(image) or webp_blob(image)
def image_to_blob(image) -> protos.Blob:
if PIL is not None:
if isinstance(image, PIL.Image.Image):
- return pil_to_blob(image)
+ return _pil_to_blob(image)
if IPython is not None:
if isinstance(image, IPython.display.Image):
@@ -379,7 +417,7 @@ def _schema_for_function(
def _build_schema(fname, fields_dict):
- parameters = pydantic.create_model(fname, **fields_dict).schema()
+ parameters = pydantic.create_model(fname, **fields_dict).model_json_schema()
defs = parameters.pop("$defs", {})
# flatten the defs
for name, value in defs.items():
@@ -397,11 +435,15 @@ def _build_schema(fname, fields_dict):
# * https://github.com/pydantic/pydantic/issues/1051
# * http://cl/586221780
strip_titles(parameters)
+ strip_additional_properties(parameters)
return parameters
def unpack_defs(schema, defs):
- properties = schema["properties"]
+ properties = schema.get("properties", None)
+ if properties is None:
+ return
+
for name, value in properties.items():
ref_key = value.get("$ref", None)
if ref_key is not None:
@@ -443,6 +485,19 @@ def strip_titles(schema):
strip_titles(items)
+def strip_additional_properties(schema):
+ schema.pop("additionalProperties", None)
+
+ properties = schema.get("properties", None)
+ if properties is not None:
+ for name, value in properties.items():
+ strip_additional_properties(value)
+
+ items = schema.get("items", None)
+ if items is not None:
+ strip_additional_properties(items)
+
+
def add_object_type(schema):
properties = schema.get("properties", None)
if properties is not None:
@@ -624,16 +679,54 @@ def _encode_fd(fd: FunctionDeclaration | protos.FunctionDeclaration) -> protos.F
return fd.to_proto()
+class DynamicRetrievalConfigDict(TypedDict):
+ mode: protos.DynamicRetrievalConfig.mode
+ dynamic_threshold: float
+
+
+DynamicRetrievalConfig = Union[protos.DynamicRetrievalConfig, DynamicRetrievalConfigDict]
+
+
+class GoogleSearchRetrievalDict(TypedDict):
+ dynamic_retrieval_config: DynamicRetrievalConfig
+
+
+GoogleSearchRetrievalType = Union[protos.GoogleSearchRetrieval, GoogleSearchRetrievalDict]
+
+
+def _make_google_search_retrieval(gsr: GoogleSearchRetrievalType):
+ if isinstance(gsr, protos.GoogleSearchRetrieval):
+ return gsr
+ elif isinstance(gsr, Mapping):
+ drc = gsr.get("dynamic_retrieval_config", None)
+ if drc is not None and isinstance(drc, Mapping):
+ mode = drc.get("mode", None)
+ if mode is not None:
+ mode = to_mode(mode)
+ gsr = gsr.copy()
+ gsr["dynamic_retrieval_config"]["mode"] = mode
+ return protos.GoogleSearchRetrieval(gsr)
+ else:
+ raise TypeError(
+ "Invalid input type. Expected an instance of `genai.GoogleSearchRetrieval`.\n"
+ f"However, received an object of type: {type(gsr)}.\n"
+ f"Object Value: {gsr}"
+ )
+
+
class Tool:
- """A wrapper for `protos.Tool`, Contains a collection of related `FunctionDeclaration` objects."""
+ """A wrapper for `protos.Tool`, Contains a collection of related `FunctionDeclaration` objects,
+ protos.CodeExecution object, and protos.GoogleSearchRetrieval object."""
def __init__(
self,
+ *,
function_declarations: Iterable[FunctionDeclarationType] | None = None,
+ google_search_retrieval: GoogleSearchRetrievalType | None = None,
code_execution: protos.CodeExecution | None = None,
):
# The main path doesn't use this but is seems useful.
- if function_declarations:
+ if function_declarations is not None:
self._function_declarations = [
_make_function_declaration(f) for f in function_declarations
]
@@ -648,8 +741,14 @@ def __init__(
self._function_declarations = []
self._index = {}
+ if google_search_retrieval is not None:
+ self._google_search_retrieval = _make_google_search_retrieval(google_search_retrieval)
+ else:
+ self._google_search_retrieval = None
+
self._proto = protos.Tool(
function_declarations=[_encode_fd(fd) for fd in self._function_declarations],
+ google_search_retrieval=google_search_retrieval,
code_execution=code_execution,
)
@@ -657,6 +756,10 @@ def __init__(
def function_declarations(self) -> list[FunctionDeclaration | protos.FunctionDeclaration]:
return self._function_declarations
+ @property
+ def google_search_retrieval(self) -> protos.GoogleSearchRetrieval:
+ return self._google_search_retrieval
+
@property
def code_execution(self) -> protos.CodeExecution:
return self._proto.code_execution
@@ -685,7 +788,7 @@ class ToolDict(TypedDict):
ToolType = Union[
- Tool, protos.Tool, ToolDict, Iterable[FunctionDeclarationType], FunctionDeclarationType
+ str, Tool, protos.Tool, ToolDict, Iterable[FunctionDeclarationType], FunctionDeclarationType
]
@@ -697,9 +800,23 @@ def _make_tool(tool: ToolType) -> Tool:
code_execution = tool.code_execution
else:
code_execution = None
- return Tool(function_declarations=tool.function_declarations, code_execution=code_execution)
+
+ if "google_search_retrieval" in tool:
+ google_search_retrieval = tool.google_search_retrieval
+ else:
+ google_search_retrieval = None
+
+ return Tool(
+ function_declarations=tool.function_declarations,
+ google_search_retrieval=google_search_retrieval,
+ code_execution=code_execution,
+ )
elif isinstance(tool, dict):
- if "function_declarations" in tool or "code_execution" in tool:
+ if (
+ "function_declarations" in tool
+ or "google_search_retrieval" in tool
+ or "code_execution" in tool
+ ):
return Tool(**tool)
else:
fd = tool
@@ -707,10 +824,17 @@ def _make_tool(tool: ToolType) -> Tool:
elif isinstance(tool, str):
if tool.lower() == "code_execution":
return Tool(code_execution=protos.CodeExecution())
+ # Check to see if one of the mode enums matches
+ elif tool.lower() == "google_search_retrieval":
+ return Tool(google_search_retrieval=protos.GoogleSearchRetrieval())
else:
- raise ValueError("The only string that can be passed as a tool is 'code_execution'.")
+ raise ValueError(
+ "The only string that can be passed as a tool is 'code_execution', or one of the specified values for the `mode` parameter for google_search_retrieval."
+ )
elif isinstance(tool, protos.CodeExecution):
return Tool(code_execution=tool)
+ elif isinstance(tool, protos.GoogleSearchRetrieval):
+ return Tool(google_search_retrieval=tool)
elif isinstance(tool, Iterable):
return Tool(function_declarations=tool)
else:
@@ -766,7 +890,7 @@ def to_proto(self):
def _make_tools(tools: ToolsType) -> list[Tool]:
if isinstance(tools, str):
- if tools.lower() == "code_execution":
+ if tools.lower() == "code_execution" or tools.lower() == "google_search_retrieval":
return [_make_tool(tools)]
else:
raise ValueError("The only string that can be passed as a tool is 'code_execution'.")
diff --git a/google/generativeai/types/discuss_types.py b/google/generativeai/types/discuss_types.py
deleted file mode 100644
index a538da65c..000000000
--- a/google/generativeai/types/discuss_types.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Type definitions for the discuss service."""
-
-import abc
-import dataclasses
-from typing import Any, Dict, Union, Iterable, Optional, Tuple, List
-from typing_extensions import TypedDict
-
-from google.generativeai import protos
-from google.generativeai import string_utils
-
-from google.generativeai.types import palm_safety_types
-from google.generativeai.types import citation_types
-
-
-__all__ = [
- "MessageDict",
- "MessageOptions",
- "MessagesOptions",
- "ExampleDict",
- "ExampleOptions",
- "ExamplesOptions",
- "MessagePromptDict",
- "MessagePromptOptions",
- "ResponseDict",
- "ChatResponse",
- "AuthorError",
-]
-
-
-class TokenCount(TypedDict):
- token_count: int
-
-
-class MessageDict(TypedDict):
- """A dict representation of a `protos.Message`."""
-
- author: str
- content: str
- citation_metadata: Optional[citation_types.CitationMetadataDict]
-
-
-MessageOptions = Union[str, MessageDict, protos.Message]
-MESSAGE_OPTIONS = (str, dict, protos.Message)
-
-MessagesOptions = Union[
- MessageOptions,
- Iterable[MessageOptions],
-]
-MESSAGES_OPTIONS = (MESSAGE_OPTIONS, Iterable)
-
-
-class ExampleDict(TypedDict):
- """A dict representation of a `protos.Example`."""
-
- input: MessageOptions
- output: MessageOptions
-
-
-ExampleOptions = Union[
- Tuple[MessageOptions, MessageOptions],
- Iterable[MessageOptions],
- ExampleDict,
- protos.Example,
-]
-EXAMPLE_OPTIONS = (protos.Example, dict, Iterable)
-ExamplesOptions = Union[ExampleOptions, Iterable[ExampleOptions]]
-
-
-class MessagePromptDict(TypedDict, total=False):
- """A dict representation of a `protos.MessagePrompt`."""
-
- context: str
- examples: ExamplesOptions
- messages: MessagesOptions
-
-
-MessagePromptOptions = Union[
- str,
- protos.Message,
- Iterable[Union[str, protos.Message]],
- MessagePromptDict,
- protos.MessagePrompt,
-]
-MESSAGE_PROMPT_KEYS = {"context", "examples", "messages"}
-
-
-class ResponseDict(TypedDict):
- """A dict representation of a `protos.GenerateMessageResponse`."""
-
- messages: List[MessageDict]
- candidates: List[MessageDict]
-
-
-@string_utils.prettyprint
-@dataclasses.dataclass(init=False)
-class ChatResponse(abc.ABC):
- """A chat response from the model.
-
- * Use `response.last` (settable) for easy access to the text of the last response.
- (`messages[-1]['content']`)
- * Use `response.messages` to access the message history (including `.last`).
- * Use `response.candidates` to access all the responses generated by the model.
-
- Other attributes are just saved from the arguments to `genai.chat`, so you
- can easily continue a conversation:
-
- ```
- import google.generativeai as genai
-
- genai.configure(api_key=os.environ['GOOGLE_API_KEY'])
-
- response = genai.chat(messages=["Hello."])
- print(response.last) # 'Hello! What can I help you with?'
- response.reply("Can you tell me a joke?")
- ```
-
- See `genai.chat` for more details.
-
- Attributes:
- candidates: A list of candidate responses from the model.
-
- The top candidate is appended to the `messages` field.
-
- This list will contain a *maximum* of `candidate_count` candidates.
- It may contain fewer (duplicates are dropped), it will contain at least one.
-
- Note: The `temperature` field affects the variability of the responses. Low
- temperatures will return few candidates. Setting `temperature=0` is deterministic,
- so it will only ever return one candidate.
- filters: This indicates which `types.SafetyCategory`(s) blocked a
- candidate from this response, the lowest `types.HarmProbability`
- that triggered a block, and the `types.HarmThreshold` setting for that category.
- This indicates the smallest change to the `types.SafetySettings` that would be
- necessary to unblock at least 1 response.
-
- The blocking is configured by the `types.SafetySettings` in the request (or the
- default `types.SafetySettings` of the API).
- messages: Contains all the `messages` that were passed when the model was called,
- plus the top `candidate` message.
- model: The model name.
- context: Text that should be provided to the model first, to ground the response.
- examples: Examples of what the model should generate.
- messages: A snapshot of the conversation history sorted chronologically.
- temperature: Controls the randomness of the output. Must be positive.
- candidate_count: The **maximum** number of generated response messages to return.
- top_k: The maximum number of tokens to consider when sampling.
- top_p: The maximum cumulative probability of tokens to consider when sampling.
-
- """
-
- model: str
- context: str
- examples: List[ExampleDict]
- messages: List[Optional[MessageDict]]
- temperature: Optional[float]
- candidate_count: Optional[int]
- candidates: List[MessageDict]
- filters: List[palm_safety_types.ContentFilterDict]
- top_p: Optional[float] = None
- top_k: Optional[float] = None
-
- @property
- @abc.abstractmethod
- def last(self) -> Optional[str]:
- """A settable property that provides simple access to the last response string
-
- A shortcut for `response.messages[0]['content']`.
- """
- pass
-
- def to_dict(self) -> Dict[str, Any]:
- result = {
- "model": self.model,
- "context": self.context,
- "examples": self.examples,
- "messages": self.messages,
- "temperature": self.temperature,
- "candidate_count": self.candidate_count,
- "top_p": self.top_p,
- "top_k": self.top_k,
- "candidates": self.candidates,
- }
- return result
-
- @abc.abstractmethod
- def reply(self, message: MessageOptions) -> "ChatResponse":
- "Add a message to the conversation, and get the model's response."
- pass
-
-
-class AuthorError(Exception):
- """Raised by the `chat` (or `reply`) functions when the author list can't be normalized."""
-
- pass
diff --git a/google/generativeai/types/generation_types.py b/google/generativeai/types/generation_types.py
index d4bed8b86..5a2012e64 100644
--- a/google/generativeai/types/generation_types.py
+++ b/google/generativeai/types/generation_types.py
@@ -16,7 +16,6 @@
import collections
import contextlib
-import sys
from collections.abc import Iterable, AsyncIterable, Mapping
import dataclasses
import itertools
@@ -86,6 +85,8 @@ class GenerationConfigDict(TypedDict, total=False):
temperature: float
response_mime_type: str
response_schema: protos.Schema | Mapping[str, Any] # fmt: off
+ presence_penalty: float
+ frequency_penalty: float
@dataclasses.dataclass
@@ -145,17 +146,21 @@ class GenerationConfig:
Note: The default value varies by model, see the
`Model.top_k` attribute of the `Model` returned the
`genai.get_model` function.
-
response_mime_type:
Optional. Output response mimetype of the generated candidate text.
Supported mimetype:
`text/plain`: (default) Text output.
+ `text/x-enum`: for use with a string-enum in `response_schema`
`application/json`: JSON response in the candidates.
response_schema:
Optional. Specifies the format of the JSON requested if response_mime_type is
`application/json`.
+ presence_penalty:
+ Optional.
+ frequency_penalty:
+ Optional.
"""
candidate_count: int | None = None
@@ -165,7 +170,9 @@ class GenerationConfig:
top_p: float | None = None
top_k: int | None = None
response_mime_type: str | None = None
- response_schema: protos.Schema | Mapping[str, Any] | None = None
+ response_schema: protos.Schema | Mapping[str, Any] | type | None = None
+ presence_penalty: float | None = None
+ frequency_penalty: float | None = None
GenerationConfigType = Union[protos.GenerationConfig, GenerationConfigDict, GenerationConfig]
@@ -186,7 +193,8 @@ def _normalize_schema(generation_config):
if not str(response_schema).startswith("list["):
raise ValueError(
f"Invalid input: Could not understand the type of '{response_schema}'. "
- "Expected one of the following types: `int`, `float`, `str`, `bool`, `typing_extensions.TypedDict`, `dataclass`, or `list[...]`."
+ "Expected one of the following types: `int`, `float`, `str`, `bool`, `enum`, "
+ "`typing_extensions.TypedDict`, `dataclass` or `list[...]`."
)
response_schema = content_types._schema_for_class(response_schema)
@@ -306,6 +314,7 @@ def _join_code_execution_result(result_1, result_2):
def _join_candidates(candidates: Iterable[protos.Candidate]):
+ """Joins stream chunks of a single candidate."""
candidates = tuple(candidates)
index = candidates[0].index # These should all be the same.
@@ -321,6 +330,7 @@ def _join_candidates(candidates: Iterable[protos.Candidate]):
def _join_candidate_lists(candidate_lists: Iterable[list[protos.Candidate]]):
+ """Joins stream chunks where each chunk is a list of candidate chunks."""
# Assuming that is a candidate ends, it is no longer returned in the list of
# candidates and that's why candidates have an index
candidates = collections.defaultdict(list)
@@ -344,10 +354,21 @@ def _join_prompt_feedbacks(
def _join_chunks(chunks: Iterable[protos.GenerateContentResponse]):
chunks = tuple(chunks)
+ if "usage_metadata" in chunks[-1]:
+ usage_metadata = chunks[-1].usage_metadata
+ else:
+ usage_metadata = None
+
+ if "model_version" in chunks[-1]:
+ model_version = chunks[-1].model_version
+ else:
+ model_version = None
+
return protos.GenerateContentResponse(
candidates=_join_candidate_lists(c.candidates for c in chunks),
prompt_feedback=_join_prompt_feedbacks(c.prompt_feedback for c in chunks),
- usage_metadata=chunks[-1].usage_metadata,
+ usage_metadata=usage_metadata,
+ model_version=model_version,
)
@@ -412,14 +433,22 @@ def parts(self):
"""
candidates = self.candidates
if not candidates:
- raise ValueError(
+ msg = (
"Invalid operation: The `response.parts` quick accessor requires a single candidate, "
- "but none were returned. Please check the `response.prompt_feedback` to determine if the prompt was blocked."
+ "but but `response.candidates` is empty."
)
+ if self.prompt_feedback:
+ raise ValueError(
+ msg + "\nThis appears to be caused by a blocked prompt, "
+ f"see `response.prompt_feedback`: {self.prompt_feedback}"
+ )
+ else:
+ raise ValueError(msg)
+
if len(candidates) > 1:
raise ValueError(
- "Invalid operation: The `response.parts` quick accessor requires a single candidate. "
- "For multiple candidates, please use `result.candidates[index].text`."
+ "Invalid operation: The `response.parts` quick accessor retrieves the parts for a single candidate. "
+ "This response contains multiple candidates, please use `result.candidates[index].text`."
)
parts = candidates[0].content.parts
return parts
@@ -433,11 +462,52 @@ def text(self):
"""
parts = self.parts
if not parts:
- raise ValueError(
- "Invalid operation: The `response.text` quick accessor requires the response to contain a valid `Part`, "
- "but none were returned. Please check the `candidate.safety_ratings` to determine if the response was blocked."
+ candidate = self.candidates[0]
+
+ fr = candidate.finish_reason
+ FinishReason = protos.Candidate.FinishReason
+
+ msg = (
+ "Invalid operation: The `response.text` quick accessor requires the response to contain a valid "
+ "`Part`, but none were returned. The candidate's "
+ f"[finish_reason](https://ai.google.dev/api/generate-content#finishreason) is {fr}."
)
+ if fr is FinishReason.FINISH_REASON_UNSPECIFIED:
+ raise ValueError(msg)
+ elif fr is FinishReason.STOP:
+ raise ValueError(msg)
+ elif fr is FinishReason.MAX_TOKENS:
+ raise ValueError(msg)
+ elif fr is FinishReason.SAFETY:
+ raise ValueError(
+ msg + f" The candidate's safety_ratings are: {candidate.safety_ratings}.",
+ candidate.safety_ratings,
+ )
+ elif fr is FinishReason.RECITATION:
+ raise ValueError(
+ msg + " Meaning that the model was reciting from copyrighted material."
+ )
+ elif fr is FinishReason.LANGUAGE:
+ raise ValueError(msg + " Meaning the response was using an unsupported language.")
+ elif fr is FinishReason.OTHER:
+ raise ValueError(msg)
+ elif fr is FinishReason.BLOCKLIST:
+ raise ValueError(msg)
+ elif fr is FinishReason.PROHIBITED_CONTENT:
+ raise ValueError(msg)
+ elif fr is FinishReason.SPII:
+ raise ValueError(msg + " SPII - Sensitive Personally Identifiable Information.")
+ elif fr is FinishReason.MALFORMED_FUNCTION_CALL:
+ raise ValueError(
+ msg + " Meaning that model generated a `FunctionCall` that was invalid. "
+ "Setting the "
+ "[Function calling mode](https://ai.google.dev/gemini-api/docs/function-calling#function_calling_mode) "
+ "to `ANY` can fix this because it enables constrained decoding."
+ )
+ else:
+ raise ValueError(msg)
+
texts = []
for part in parts:
if "text" in part:
@@ -462,7 +532,7 @@ def text(self):
texts.extend([f"```{outcome_result}", part.code_execution_result.output, "```"])
continue
- part_type = protos.Part.pb(part).whichOneof("data")
+ part_type = protos.Part.pb(part).WhichOneof("data")
raise ValueError(f"Could not convert `part.{part_type}` to text.")
return "\n".join(texts)
@@ -475,6 +545,10 @@ def prompt_feedback(self):
def usage_metadata(self):
return self._result.usage_metadata
+ @property
+ def model_version(self):
+ return self._result.model_version
+
def __str__(self) -> str:
if self._done:
_iterator = "None"
@@ -490,7 +564,8 @@ def __str__(self) -> str:
_result = _result.replace("\n", "\n ")
if self._error:
- _error = f",\nerror=<{self._error.__class__.__name__}> {self._error}"
+
+ _error = f",\nerror={repr(self._error)}"
else:
_error = ""
diff --git a/google/generativeai/types/model_types.py b/google/generativeai/types/model_types.py
index 03922a64e..631e44d33 100644
--- a/google/generativeai/types/model_types.py
+++ b/google/generativeai/types/model_types.py
@@ -143,7 +143,9 @@ def idecode_time(parent: dict["str", Any], name: str):
def decode_tuned_model(tuned_model: protos.TunedModel | dict["str", Any]) -> TunedModel:
if isinstance(tuned_model, protos.TunedModel):
- tuned_model = type(tuned_model).to_dict(tuned_model) # pytype: disable=attribute-error
+ tuned_model = type(tuned_model).to_dict(
+ tuned_model, including_default_value_fields=False
+ ) # pytype: disable=attribute-error
tuned_model["state"] = to_tuned_model_state(tuned_model.pop("state", None))
base_model = tuned_model.pop("base_model", None)
@@ -195,6 +197,7 @@ class TunedModel:
create_time: datetime.datetime | None = None
update_time: datetime.datetime | None = None
tuning_task: TuningTask | None = None
+ reader_project_numbers: list[int] | None = None
@property
def permissions(self) -> permission_types.Permissions:
@@ -352,7 +355,10 @@ def make_model_name(name: AnyModelNameOptions):
if isinstance(name, (Model, protos.Model, TunedModel, protos.TunedModel)):
name = name.name # pytype: disable=attribute-error
elif isinstance(name, str):
- name = name
+ if "/" not in name:
+ name = "models/" + name
+ else:
+ name = name
else:
raise TypeError(
"Invalid input type. Expected one of the following types: `str`, `Model`, or `TunedModel`."
diff --git a/google/generativeai/types/text_types.py b/google/generativeai/types/text_types.py
index 61804fcaa..e84a7e715 100644
--- a/google/generativeai/types/text_types.py
+++ b/google/generativeai/types/text_types.py
@@ -21,55 +21,12 @@
from typing_extensions import TypedDict
from google.generativeai import string_utils
-from google.generativeai.types import palm_safety_types
from google.generativeai.types import citation_types
-__all__ = ["Completion"]
-
-
-class TokenCount(TypedDict):
- token_count: int
-
-
class EmbeddingDict(TypedDict):
embedding: list[float]
class BatchEmbeddingDict(TypedDict):
embedding: list[list[float]]
-
-
-class TextCompletion(TypedDict, total=False):
- output: str
- safety_ratings: List[palm_safety_types.SafetyRatingDict | None]
- citation_metadata: citation_types.CitationMetadataDict | None
-
-
-@string_utils.prettyprint
-@dataclasses.dataclass(init=False)
-class Completion(abc.ABC):
- """The result returned by `generativeai.generate_text`.
-
- Use `GenerateTextResponse.candidates` to access all the completions generated by the model.
-
- Attributes:
- candidates: A list of candidate text completions generated by the model.
- result: The output of the first candidate,
- filters: Indicates the reasons why content may have been blocked.
- See `types.BlockedReason`.
- safety_feedback: Indicates which safety settings blocked content in this result.
- """
-
- candidates: List[TextCompletion]
- result: str | None
- filters: List[palm_safety_types.ContentFilterDict | None]
- safety_feedback: List[palm_safety_types.SafetyFeedbackDict | None]
-
- def to_dict(self) -> Dict[str, Any]:
- result = {
- "candidates": self.candidates,
- "filters": self.filters,
- "safety_feedback": self.safety_feedback,
- }
- return result
diff --git a/google/generativeai/version.py b/google/generativeai/version.py
index f454e309f..6df9e6f74 100644
--- a/google/generativeai/version.py
+++ b/google/generativeai/version.py
@@ -14,4 +14,4 @@
# limitations under the License.
from __future__ import annotations
-__version__ = "0.7.2"
+__version__ = "0.8.5"
diff --git a/samples/README.md b/samples/README.md
new file mode 100644
index 000000000..ce2e9d243
--- /dev/null
+++ b/samples/README.md
@@ -0,0 +1,26 @@
+# Gemini API Python SDK sample code
+
+This directory contains sample code for key features of the SDK, organised by high level feature.
+
+These samples are embedded in parts of the [documentation](https://ai.google.dev), most notably in the [API reference](https://ai.google.dev/api).
+
+Each file is structured as a runnable test case, ensuring that samples are executable and functional. Each test demonstrates a single concept, and contains region tags that are used to demarcate the test scaffolding from the spotlight code. If you are contributing, code within region tags should follow sample code best practices - being clear, complete and concise.
+
+## Contents
+
+| File | Description |
+|----------------------------------------------------------| ----------- |
+| [cache.py](./cache.py) | Context caching |
+| [chat.py](./chat.py) | Multi-turn chat conversations |
+| [code_execution.py](./code_execution.py) | Executing code |
+| [configure_model_parameters.py](./configure_model_parameters.py) | Setting model parameters |
+| [controlled_generation.py](./controlled_generation.py) | Generating content with output constraints (e.g. JSON mode) |
+| [count_tokens.py](./count_tokens.py) | Counting input and output tokens |
+| [embed.py](./embed.py) | Generating embeddings |
+| [files.py](./files.py) | Managing files with the File API |
+| [function_calling.py](./function_calling.py) | Using function calling |
+| [models.py](./models.py) | Listing models and model metadata |
+| [safety_settings.py](./safety_settings.py) | Setting and using safety controls |
+| [system_instruction.py](./system_instruction.py) | Setting system instructions |
+| [text_generation.py](./text_generation.py) | Generating text |
+| [tuned_models.py](./tuned_models.py) | Creating and managing tuned models |
diff --git a/samples/cache.py b/samples/cache.py
index 82c4c1d7d..1d4dd6e47 100644
--- a/samples/cache.py
+++ b/samples/cache.py
@@ -14,7 +14,6 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
import pathlib
@@ -24,6 +23,8 @@
class UnitTests(absltest.TestCase):
def test_cache_create(self):
# [START cache_create]
+ import google.generativeai as genai
+
document = genai.upload_file(path=media / "a11.txt")
model_name = "gemini-1.5-flash-001"
cache = genai.caching.CachedContent.create(
@@ -41,6 +42,8 @@ def test_cache_create(self):
def test_cache_create_from_name(self):
# [START cache_create_from_name]
+ import google.generativeai as genai
+
document = genai.upload_file(path=media / "a11.txt")
model_name = "gemini-1.5-flash-001"
cache = genai.caching.CachedContent.create(
@@ -60,6 +63,8 @@ def test_cache_create_from_name(self):
def test_cache_create_from_chat(self):
# [START cache_create_from_chat]
+ import google.generativeai as genai
+
model_name = "gemini-1.5-flash-001"
system_instruction = "You are an expert analyzing transcripts."
@@ -92,6 +97,8 @@ def test_cache_create_from_chat(self):
def test_cache_delete(self):
# [START cache_delete]
+ import google.generativeai as genai
+
document = genai.upload_file(path=media / "a11.txt")
model_name = "gemini-1.5-flash-001"
cache = genai.caching.CachedContent.create(
@@ -104,6 +111,8 @@ def test_cache_delete(self):
def test_cache_get(self):
# [START cache_get]
+ import google.generativeai as genai
+
document = genai.upload_file(path=media / "a11.txt")
model_name = "gemini-1.5-flash-001"
cache = genai.caching.CachedContent.create(
@@ -117,6 +126,8 @@ def test_cache_get(self):
def test_cache_list(self):
# [START cache_list]
+ import google.generativeai as genai
+
document = genai.upload_file(path=media / "a11.txt")
model_name = "gemini-1.5-flash-001"
cache = genai.caching.CachedContent.create(
@@ -132,6 +143,8 @@ def test_cache_list(self):
def test_cache_update(self):
# [START cache_update]
+ import google.generativeai as genai
+
import datetime
document = genai.upload_file(path=media / "a11.txt")
diff --git a/samples/chat.py b/samples/chat.py
index 5089450d9..d4d77eeca 100644
--- a/samples/chat.py
+++ b/samples/chat.py
@@ -14,7 +14,6 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
import pathlib
media = pathlib.Path(__file__).parents[1] / "third_party"
@@ -23,6 +22,8 @@
class UnitTests(absltest.TestCase):
def test_chat(self):
# [START chat]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat(
history=[
@@ -38,6 +39,8 @@ def test_chat(self):
def test_chat_streaming(self):
# [START chat_streaming]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat(
history=[
@@ -59,6 +62,8 @@ def test_chat_streaming(self):
def test_chat_streaming_with_images(self):
# [START chat_streaming_with_images]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
diff --git a/samples/code_execution.py b/samples/code_execution.py
new file mode 100644
index 000000000..dfc2ce84e
--- /dev/null
+++ b/samples/code_execution.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+# Copyright 2023 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from absl.testing import absltest
+
+
+class UnitTests(absltest.TestCase):
+ def test_code_execution_basic(self):
+ # [START code_execution_basic]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools="code_execution")
+ response = model.generate_content(
+ (
+ "What is the sum of the first 50 prime numbers? "
+ "Generate and run code for the calculation, and make sure you get all 50."
+ )
+ )
+
+ # Each `part` either contains `text`, `executable_code` or an `execution_result`
+ for part in response.candidates[0].content.parts:
+ print(part, "\n")
+
+ print("-" * 80)
+ # The `.text` accessor joins the parts into a markdown compatible text representation.
+ print("\n\n", response.text)
+ # [END code_execution_basic]
+
+ # [START code_execution_basic_return]
+ import google.generativeai as genai
+
+ # text: "I can help with that! To calculate the sum of the first 50 prime numbers, we\'ll need to first identify all the prime numbers up to the 50th prime number. \n\nHere is the code to find and sum the first 50 prime numbers:\n\n"
+ #
+ # executable_code {
+ # language: PYTHON
+ # code: "\ndef is_prime(n):\n \"\"\"\n Checks if a number is prime.\n \"\"\"\n if n <= 1:\n return False\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\nprime_count = 0\nnumber = 2\nprimes = []\nwhile prime_count < 50:\n if is_prime(number):\n primes.append(number)\n prime_count += 1\n number += 1\n\nprint(f\'The sum of the first 50 prime numbers is: {sum(primes)}\')\n"
+ # }
+ #
+ # code_execution_result {
+ # outcome: OUTCOME_OK
+ # output: "The sum of the first 50 prime numbers is: 5117\n"
+ # }
+ #
+ # text: "I ran the code and it calculated that the sum of the first 50 prime numbers is 5117. \n"
+ #
+ #
+ # --------------------------------------------------------------------------------
+ # I can help with that! To calculate the sum of the first 50 prime numbers, we'll need to first identify all the prime numbers up to the 50th prime number.
+ #
+ # Here is the code to find and sum the first 50 prime numbers:
+ #
+ #
+ # ``` python
+ # def is_prime(n):
+ # """
+ # Checks if a number is prime.
+ # """
+ # if n <= 1:
+ # return False
+ # for i in range(2, int(n**0.5) + 1):
+ # if n % i == 0:
+ # return False
+ # return True
+ #
+ # prime_count = 0
+ # number = 2
+ # primes = []
+ # while prime_count < 50:
+ # if is_prime(number):
+ # primes.append(number)
+ # prime_count += 1
+ # number += 1
+ #
+ # print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
+ #
+ # ```
+ # ```
+ # The sum of the first 50 prime numbers is: 5117
+ #
+ # ```
+ # I ran the code and it calculated that the sum of the first 50 prime numbers is 5117.
+ # [END code_execution_basic_return]
+
+ def test_code_execution_request_override(self):
+ # [START code_execution_request_override]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash")
+ response = model.generate_content(
+ (
+ "What is the sum of the first 50 prime numbers? "
+ "Generate and run code for the calculation, and make sure you get all 50."
+ ),
+ tools="code_execution",
+ )
+ print(response.text)
+ # [END code_execution_request_override]
+ # [START code_execution_request_override_return]
+ import google.generativeai as genai
+
+ # ``` python
+ # def is_prime(n):
+ # """
+ # Checks if a number is prime.
+ # """
+ # if n <= 1:
+ # return False
+ # for i in range(2, int(n**0.5) + 1):
+ # if n % i == 0:
+ # return False
+ # return True
+ #
+ # primes = []
+ # num = 2
+ # count = 0
+ # while count < 50:
+ # if is_prime(num):
+ # primes.append(num)
+ # count += 1
+ # num += 1
+ #
+ # print(f'The first 50 prime numbers are: {primes}')
+ # print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
+ #
+ # ```
+ # ```
+ # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
+ # The sum of the first 50 prime numbers is: 5117
+ #
+ # ```
+ # The code generated a list of the first 50 prime numbers, then sums the list to find the answer.
+ #
+ # The sum of the first 50 prime numbers is **5117**.
+ # [END code_execution_request_override_return]
+
+ def test_code_execution_chat(self):
+ # [START code_execution_chat]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash", tools="code_execution")
+ chat = model.start_chat()
+ response = chat.send_message('Can you print "Hello world!"?')
+ response = chat.send_message(
+ (
+ "What is the sum of the first 50 prime numbers? "
+ "Generate and run code for the calculation, and make sure you get all 50."
+ )
+ )
+ print(response.text)
+ # [END code_execution_chat]
+ # [START code_execution_chat_return]
+ import google.generativeai as genai
+
+ # ``` python
+ # def is_prime(n):
+ # """
+ # Checks if a number is prime.
+ # """
+ # if n <= 1:
+ # return False
+ # for i in range(2, int(n**0.5) + 1):
+ # if n % i == 0:
+ # return False
+ # return True
+ #
+ # primes = []
+ # num = 2
+ # count = 0
+ # while count < 50:
+ # if is_prime(num):
+ # primes.append(num)
+ # count += 1
+ # num += 1
+ #
+ # print(f'The first 50 prime numbers are: {primes}')
+ # print(f'The sum of the first 50 prime numbers is: {sum(primes)}')
+ #
+ # ```
+ # ```
+ # The first 50 prime numbers are: [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229]
+ # The sum of the first 50 prime numbers is: 5117
+ #
+ # ```
+ # The code generated a list of the first 50 prime numbers, then sums the list to find the answer.
+ #
+ # The sum of the first 50 prime numbers is **5117**.
+ # [END code_execution_chat_return]
+
+
+if __name__ == "__main__":
+ absltest.main()
diff --git a/samples/model_configuration.py b/samples/configure_model_parameters.py
similarity index 90%
rename from samples/model_configuration.py
rename to samples/configure_model_parameters.py
index 54aec9763..3fe28a5a8 100644
--- a/samples/model_configuration.py
+++ b/samples/configure_model_parameters.py
@@ -14,12 +14,12 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
-
class UnitTests(absltest.TestCase):
def test_configure_model(self):
- # [START configure_model]
+ # [START configure_model_parameters]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
response = model.generate_content(
"Tell me a story about a magic backpack.",
@@ -33,7 +33,7 @@ def test_configure_model(self):
)
print(response.text)
- # [END configure_model]
+ # [END configure_model_parameters]
if __name__ == "__main__":
diff --git a/samples/controlled_generation.py b/samples/controlled_generation.py
index e46b1a912..5caa9b7d4 100644
--- a/samples/controlled_generation.py
+++ b/samples/controlled_generation.py
@@ -11,23 +11,28 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
+import pathlib
-import google.generativeai as genai
+
+media = pathlib.Path(__file__).parents[1] / "third_party"
class UnitTests(absltest.TestCase):
def test_json_controlled_generation(self):
# [START json_controlled_generation]
+ import google.generativeai as genai
+
import typing_extensions as typing
class Recipe(typing.TypedDict):
recipe_name: str
+ ingredients: list[str]
model = genai.GenerativeModel("gemini-1.5-pro-latest")
result = model.generate_content(
"List a few popular cookie recipes.",
generation_config=genai.GenerationConfig(
- response_mime_type="application/json", response_schema=list([Recipe])
+ response_mime_type="application/json", response_schema=list[Recipe]
),
)
print(result)
@@ -35,15 +40,139 @@ class Recipe(typing.TypedDict):
def test_json_no_schema(self):
# [START json_no_schema]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-pro-latest")
- prompt = """List a few popular cookie recipes using this JSON schema:
+ prompt = """List a few popular cookie recipes in JSON format.
+
+ Use this JSON schema:
- Recipe = {'recipe_name': str}
+ Recipe = {'recipe_name': str, 'ingredients': list[str]}
Return: list[Recipe]"""
result = model.generate_content(prompt)
print(result)
# [END json_no_schema]
+ def test_json_enum(self):
+ # [START json_enum]
+ import google.generativeai as genai
+
+ import enum
+
+ class Choice(enum.Enum):
+ PERCUSSION = "Percussion"
+ STRING = "String"
+ WOODWIND = "Woodwind"
+ BRASS = "Brass"
+ KEYBOARD = "Keyboard"
+
+ model = genai.GenerativeModel("gemini-1.5-pro-latest")
+
+ organ = genai.upload_file(media / "organ.jpg")
+ result = model.generate_content(
+ ["What kind of instrument is this:", organ],
+ generation_config=genai.GenerationConfig(
+ response_mime_type="application/json", response_schema=Choice
+ ),
+ )
+ print(result) # "Keyboard"
+ # [END json_enum]
+
+ def test_enum_in_json(self):
+ # [START enum_in_json]
+ import google.generativeai as genai
+
+ import enum
+ from typing_extensions import TypedDict
+
+ class Grade(enum.Enum):
+ A_PLUS = "a+"
+ A = "a"
+ B = "b"
+ C = "c"
+ D = "d"
+ F = "f"
+
+ class Recipe(TypedDict):
+ recipe_name: str
+ grade: Grade
+
+ model = genai.GenerativeModel("gemini-1.5-pro-latest")
+
+ result = model.generate_content(
+ "List about 10 cookie recipes, grade them based on popularity",
+ generation_config=genai.GenerationConfig(
+ response_mime_type="application/json", response_schema=list[Recipe]
+ ),
+ )
+ print(result) # [{"grade": "a+", "recipe_name": "Chocolate Chip Cookies"}, ...]
+ # [END enum_in_json]
+
+ def test_json_enum_raw(self):
+ # [START json_enum_raw]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-pro-latest")
+
+ organ = genai.upload_file(media / "organ.jpg")
+ result = model.generate_content(
+ ["What kind of instrument is this:", organ],
+ generation_config=genai.GenerationConfig(
+ response_mime_type="application/json",
+ response_schema={
+ "type": "STRING",
+ "enum": ["Percussion", "String", "Woodwind", "Brass", "Keyboard"],
+ },
+ ),
+ )
+ print(result) # "Keyboard"
+ # [END json_enum_raw]
+
+ def test_x_enum(self):
+ # [START x_enum]
+ import google.generativeai as genai
+
+ import enum
+
+ class Choice(enum.Enum):
+ PERCUSSION = "Percussion"
+ STRING = "String"
+ WOODWIND = "Woodwind"
+ BRASS = "Brass"
+ KEYBOARD = "Keyboard"
+
+ model = genai.GenerativeModel("gemini-1.5-pro-latest")
+
+ organ = genai.upload_file(media / "organ.jpg")
+ result = model.generate_content(
+ ["What kind of instrument is this:", organ],
+ generation_config=genai.GenerationConfig(
+ response_mime_type="text/x.enum", response_schema=Choice
+ ),
+ )
+ print(result) # Keyboard
+ # [END x_enum]
+
+ def test_x_enum_raw(self):
+ # [START x_enum_raw]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-pro-latest")
+
+ organ = genai.upload_file(media / "organ.jpg")
+ result = model.generate_content(
+ ["What kind of instrument is this:", organ],
+ generation_config=genai.GenerationConfig(
+ response_mime_type="text/x.enum",
+ response_schema={
+ "type": "STRING",
+ "enum": ["Percussion", "String", "Woodwind", "Brass", "Keyboard"],
+ },
+ ),
+ )
+ print(result) # Keyboard
+ # [END x_enum_raw]
+
if __name__ == "__main__":
absltest.main()
diff --git a/samples/count_tokens.py b/samples/count_tokens.py
index ca42a1bb6..33431dde8 100644
--- a/samples/count_tokens.py
+++ b/samples/count_tokens.py
@@ -14,82 +14,257 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
import pathlib
media = pathlib.Path(__file__).parents[1] / "third_party"
class UnitTests(absltest.TestCase):
+ def test_tokens_context_window(self):
+ # [START tokens_context_window]
+ import google.generativeai as genai
+
+ model_info = genai.get_model("models/gemini-1.5-flash")
+
+ # Returns the "context window" for the model,
+ # which is the combined input and output token limits.
+ print(f"{model_info.input_token_limit=}")
+ print(f"{model_info.output_token_limit=}")
+ # ( input_token_limit=30720, output_token_limit=2048 )
+ # [END tokens_context_window]
+
def test_tokens_text_only(self):
# [START tokens_text_only]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
- print(model.count_tokens("The quick brown fox jumps over the lazy dog."))
+
+ prompt = "The quick brown fox jumps over the lazy dog."
+
+ # Call `count_tokens` to get the input token count (`total_tokens`).
+ print("total_tokens: ", model.count_tokens(prompt))
+ # ( total_tokens: 10 )
+
+ response = model.generate_content(prompt)
+
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
+ print(response.usage_metadata)
+ # ( prompt_token_count: 11, candidates_token_count: 73, total_token_count: 84 )
# [END tokens_text_only]
def test_tokens_chat(self):
# [START tokens_chat]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
+
chat = model.start_chat(
history=[
- {"role": "user", "parts": "Hi, my name is Bob."},
+ {"role": "user", "parts": "Hi my name is Bob"},
{"role": "model", "parts": "Hi Bob!"},
]
)
- model.count_tokens(chat.history)
+ # Call `count_tokens` to get the input token count (`total_tokens`).
+ print(model.count_tokens(chat.history))
+ # ( total_tokens: 10 )
+
+ response = chat.send_message(
+ "In one sentence, explain how a computer works to a young child."
+ )
+
+ # On the response for `send_message`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
+ print(response.usage_metadata)
+ # ( prompt_token_count: 25, candidates_token_count: 21, total_token_count: 46 )
from google.generativeai.types.content_types import to_contents
- model.count_tokens(chat.history + to_contents("What is the meaning of life?"))
+ # You can call `count_tokens` on the combined history and content of the next turn.
+ print(model.count_tokens(chat.history + to_contents("What is the meaning of life?")))
+ # ( total_tokens: 56 )
# [END tokens_chat]
def test_tokens_multimodal_image_inline(self):
# [START tokens_multimodal_image_inline]
+ import google.generativeai as genai
+
+ import PIL.Image
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
- import PIL
- organ = PIL.Image.open(media / "organ.jpg")
- print(model.count_tokens(["Tell me about this instrument", organ]))
+ prompt = "Tell me about this image"
+ your_image_file = PIL.Image.open(media / "organ.jpg")
+
+ # Call `count_tokens` to get the input token count
+ # of the combined text and file (`total_tokens`).
+ # An image's display or file size does not affect its token count.
+ # Optionally, you can call `count_tokens` for the text and file separately.
+ print(model.count_tokens([prompt, your_image_file]))
+ # ( total_tokens: 263 )
+
+ response = model.generate_content([prompt, your_image_file])
+
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
+ print(response.usage_metadata)
+ # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
# [END tokens_multimodal_image_inline]
def test_tokens_multimodal_image_file_api(self):
# [START tokens_multimodal_image_file_api]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
- organ_upload = genai.upload_file(media / "organ.jpg")
- print(model.count_tokens(["Tell me about this instrument", organ_upload]))
+
+ prompt = "Tell me about this image"
+ your_image_file = genai.upload_file(path=media / "organ.jpg")
+
+ # Call `count_tokens` to get the input token count
+ # of the combined text and file (`total_tokens`).
+ # An image's display or file size does not affect its token count.
+ # Optionally, you can call `count_tokens` for the text and file separately.
+ print(model.count_tokens([prompt, your_image_file]))
+ # ( total_tokens: 263 )
+
+ response = model.generate_content([prompt, your_image_file])
+ response.text
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
+ print(response.usage_metadata)
+ # ( prompt_token_count: 264, candidates_token_count: 80, total_token_count: 345 )
# [END tokens_multimodal_image_file_api]
def test_tokens_multimodal_video_audio_file_api(self):
# [START tokens_multimodal_video_audio_file_api]
+ import google.generativeai as genai
+
+ import time
+
model = genai.GenerativeModel("models/gemini-1.5-flash")
- audio_upload = genai.upload_file(media / "sample.mp3")
- print(model.count_tokens(audio_upload))
+
+ prompt = "Tell me about this video"
+ your_file = genai.upload_file(path=media / "Big_Buck_Bunny.mp4")
+
+ # Videos need to be processed before you can use them.
+ while your_file.state.name == "PROCESSING":
+ print("processing video...")
+ time.sleep(5)
+ your_file = genai.get_file(your_file.name)
+
+ # Call `count_tokens` to get the input token count
+ # of the combined text and video/audio file (`total_tokens`).
+ # A video or audio file is converted to tokens at a fixed rate of tokens per second.
+ # Optionally, you can call `count_tokens` for the text and file separately.
+ print(model.count_tokens([prompt, your_file]))
+ # ( total_tokens: 300 )
+
+ response = model.generate_content([prompt, your_file])
+
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the combined token count (`total_token_count`).
+ print(response.usage_metadata)
+ # ( prompt_token_count: 301, candidates_token_count: 60, total_token_count: 361 )
+
# [END tokens_multimodal_video_audio_file_api]
+ def test_tokens_multimodal_pdf_file_api(self):
+ # [START tokens_multimodal_pdf_file_api]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ token_count = model.count_tokens(["Give me a summary of this document.", sample_pdf])
+ print(f"{token_count=}")
+
+ response = model.generate_content(["Give me a summary of this document.", sample_pdf])
+ print(response.usage_metadata)
+ # [END tokens_multimodal_pdf_file_api]
+
def test_tokens_cached_content(self):
# [START tokens_cached_content]
- document = genai.upload_file(path=media / "a11.txt")
- model_name = "gemini-1.5-flash-001"
+ import google.generativeai as genai
+
+ import time
+
+ model = genai.GenerativeModel("models/gemini-1.5-flash")
+
+ your_file = genai.upload_file(path=media / "a11.txt")
+
cache = genai.caching.CachedContent.create(
- model=model_name,
- contents=[document],
+ model="models/gemini-1.5-flash-001",
+ # You can set the system_instruction and tools
+ system_instruction=None,
+ tools=None,
+ contents=["Here the Apollo 11 transcript:", your_file],
)
- print(genai.GenerativeModel().count_tokens(cache))
+
+ model = genai.GenerativeModel.from_cached_content(cache)
+
+ prompt = "Please give a short summary of this file."
+
+ # Call `count_tokens` to get input token count
+ # of the combined text and file (`total_tokens`).
+ # A video or audio file is converted to tokens at a fixed rate of tokens per second.
+ # Optionally, you can call `count_tokens` for the text and file separately.
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 9 )
+
+ response = model.generate_content(prompt)
+
+ # On the response for `generate_content`, use `usage_metadata`
+ # to get separate input and output token counts
+ # (`prompt_token_count` and `candidates_token_count`, respectively),
+ # as well as the cached content token count and the combined total token count.
+ print(response.usage_metadata)
+ # ( prompt_token_count: 323393, cached_content_token_count: 323383, candidates_token_count: 64)
+ # ( total_token_count: 323457 )
+
+ cache.delete()
# [END tokens_cached_content]
- cache.delete() # Clear
def test_tokens_system_instruction(self):
# [START tokens_system_instruction]
- document = genai.upload_file(path=media / "a11.txt")
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash")
+
+ prompt = "The quick brown fox jumps over the lazy dog."
+
+ print(model.count_tokens(prompt))
+ # total_tokens: 10
+
model = genai.GenerativeModel(
- "models/gemini-1.5-flash-001",
- system_instruction="You are an expert analyzing transcripts. Give a summary of this document.",
+ model_name="gemini-1.5-flash", system_instruction="You are a cat. Your name is Neko."
)
- print(model.count_tokens(document))
+
+ # The total token count includes everything sent to the `generate_content` request.
+ # When you use system instructions, the total token count increases.
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 21 )
# [END tokens_system_instruction]
def test_tokens_tools(self):
# [START tokens_tools]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel(model_name="gemini-1.5-flash")
+
+ prompt = "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
+
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 22 )
+
def add(a: float, b: float):
"""returns a + b."""
return a + b
@@ -110,11 +285,10 @@ def divide(a: float, b: float):
"models/gemini-1.5-flash-001", tools=[add, subtract, multiply, divide]
)
- print(
- model.count_tokens(
- "I have 57 cats, each owns 44 mittens, how many mittens is that in total?"
- )
- )
+ # The total token count includes everything sent to the `generate_content` request.
+ # When you use tools (like function calling), the total token count increases.
+ print(model.count_tokens(prompt))
+ # ( total_tokens: 206 )
# [END tokens_tools]
diff --git a/samples/embed.py b/samples/embed.py
index 2ee4997ed..a5897639b 100644
--- a/samples/embed.py
+++ b/samples/embed.py
@@ -15,12 +15,10 @@
from absl.testing import absltest
-import google.generativeai as genai
-
-
class UnitTests(absltest.TestCase):
def test_embed_content(self):
# [START embed_content]
+ import google.generativeai as genai
text = "Hello World!"
result = genai.embed_content(
@@ -31,6 +29,8 @@ def test_embed_content(self):
def batch_embed_contents(self):
# [START batch_embed_contents]
+ import google.generativeai as genai
+
texts = [
"What is the meaning of life?",
"How much wood would a woodchuck chuck?",
diff --git a/samples/files.py b/samples/files.py
index f5cbfdc0a..0011f4da2 100644
--- a/samples/files.py
+++ b/samples/files.py
@@ -15,7 +15,6 @@
from absl.testing import absltest
import google
-import google.generativeai as genai
import pathlib
media = pathlib.Path(__file__).parents[1] / "third_party"
@@ -24,6 +23,8 @@
class UnitTests(absltest.TestCase):
def test_files_create_text(self):
# [START files_create_text]
+ import google.generativeai as genai
+
myfile = genai.upload_file(media / "poem.txt")
print(f"{myfile=}")
@@ -36,6 +37,8 @@ def test_files_create_text(self):
def test_files_create_image(self):
# [START files_create_image]
+ import google.generativeai as genai
+
myfile = genai.upload_file(media / "Cajun_instruments.jpg")
print(f"{myfile=}")
@@ -48,6 +51,8 @@ def test_files_create_image(self):
def test_files_create_audio(self):
# [START files_create_audio]
+ import google.generativeai as genai
+
myfile = genai.upload_file(media / "sample.mp3")
print(f"{myfile=}")
@@ -58,6 +63,8 @@ def test_files_create_audio(self):
def test_files_create_video(self):
# [START files_create_video]
+ import google.generativeai as genai
+
import time
# Video clip (CC BY 3.0) from https://peach.blender.org/download/
@@ -75,8 +82,34 @@ def test_files_create_video(self):
print(f"{result.text=}")
# [END files_create_video]
+ def test_files_create_pdf(self):
+ # [START files_create_pdf]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ response = model.generate_content(["Give me a summary of this pdf file.", sample_pdf])
+ print(response.text)
+ # [END files_create_pdf]
+
+ def test_files_create_from_IO(self):
+ # [START files_create_io]
+ import google.generativeai as genai
+
+ # You can pass a file-like object, instead of a path.
+ # Useful for streaming.
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ fpath = media / "test.pdf"
+ with open(fpath, "rb") as f:
+ sample_pdf = genai.upload_file(f, mime_type="application/pdf")
+ response = model.generate_content(["Give me a summary of this pdf file.", sample_pdf])
+ print(response.text)
+ # [END files_create_io]
+
def test_files_list(self):
# [START files_list]
+ import google.generativeai as genai
+
print("My files:")
for f in genai.list_files():
print(" ", f.name)
@@ -84,6 +117,8 @@ def test_files_list(self):
def test_files_get(self):
# [START files_get]
+ import google.generativeai as genai
+
myfile = genai.upload_file(media / "poem.txt")
file_name = myfile.name
print(file_name) # "files/*"
@@ -94,6 +129,8 @@ def test_files_get(self):
def test_files_delete(self):
# [START files_delete]
+ import google.generativeai as genai
+
myfile = genai.upload_file(media / "poem.txt")
myfile.delete()
diff --git a/samples/function_calling.py b/samples/function_calling.py
index 8832408cf..829b97742 100644
--- a/samples/function_calling.py
+++ b/samples/function_calling.py
@@ -14,12 +14,12 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
-
class UnitTests(absltest.TestCase):
def test_function_calling(self):
# [START function_calling]
+ import google.generativeai as genai
+
def add(a: float, b: float):
"""returns a + b."""
return a + b
diff --git a/samples/models.py b/samples/models.py
index c1758792b..1eea4b31e 100644
--- a/samples/models.py
+++ b/samples/models.py
@@ -14,12 +14,12 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
-
class UnitTests(absltest.TestCase):
def test_models_list(self):
# [START models_list]
+ import google.generativeai as genai
+
print("List of models that support generateContent:\n")
for m in genai.list_models():
if "generateContent" in m.supported_generation_methods:
@@ -33,6 +33,8 @@ def test_models_list(self):
def test_models_get(self):
# [START models_get]
+ import google.generativeai as genai
+
model_info = genai.get_model("models/gemini-1.5-flash-latest")
print(model_info)
# [END models_get]
diff --git a/samples/rest/README.md b/samples/rest/README.md
new file mode 100644
index 000000000..7969097d2
--- /dev/null
+++ b/samples/rest/README.md
@@ -0,0 +1,26 @@
+# Gemini API REST sample code
+
+This directory contains sample code for key features of the API, organised by high level feature.
+
+These samples are embedded in parts of the [documentation](https://ai.google.dev), most notably in the [API reference](https://ai.google.dev/api).
+
+Each file is structured as a runnable script, ensuring that samples are executable and functional. Each filee contains region tags that are used to demarcate the script from the spotlight code. If you are contributing, code within region tags should follow sample code best practices - being clear, complete and concise.
+
+## Contents
+
+| File | Description |
+| ---- | ----------- |
+| [cache.sh](./cache.sh) | Context caching |
+| [chat.sh](./chat.sh) | Multi-turn chat conversations |
+| [code_execution.sh](./code_execution.sh) | Executing code |
+| [configure_model_parameters.sh](./configure_model_parameters.sh) | Setting model parameters |
+| [controlled_generation.sh](./controlled_generation.sh) | Generating content with output constraints (e.g. JSON mode) |
+| [count_tokens.sh](./count_tokens.sh) | Counting input and output tokens |
+| [embed.sh](./embed.sh) | Generating embeddings |
+| [files.sh](./files.sh) | Managing files with the File API |
+| [function_calling.sh](./function_calling.sh) | Using function calling |
+| [models.sh](./models.sh) | Listing models and model metadata |
+| [safety_settings.sh](./safety_settings.sh) | Setting and using safety controls |
+| [system_instruction.sh](./system_instruction.sh) | Setting system instructions |
+| [text_generation.sh](./text_generation.sh) | Generating text |
+| [tuned_models.sh](./tuned_models.sh) | Tuned models |
diff --git a/samples/rest/cache.sh b/samples/rest/cache.sh
new file mode 100644
index 000000000..8df687886
--- /dev/null
+++ b/samples/rest/cache.sh
@@ -0,0 +1,83 @@
+set -eu
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
+echo "[START cache_create]"
+# [START cache_create]
+wget https://storage.googleapis.com/generativeai-downloads/data/a11.txt
+echo '{
+ "model": "models/gemini-1.5-flash-001",
+ "contents":[
+ {
+ "parts":[
+ {
+ "inline_data": {
+ "mime_type":"text/plain",
+ "data": "'$(base64 $B64FLAGS a11.txt)'"
+ }
+ }
+ ],
+ "role": "user"
+ }
+ ],
+ "systemInstruction": {
+ "parts": [
+ {
+ "text": "You are an expert at analyzing transcripts."
+ }
+ ]
+ },
+ "ttl": "300s"
+}' > request.json
+
+curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -d @request.json \
+ > cache.json
+
+CACHE_NAME=$(cat cache.json | grep '"name":' | cut -d '"' -f 4 | head -n 1)
+
+echo "[START cache_generate_content]"
+# [START cache_generate_content]
+curl -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash-001:generateContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{
+ "contents": [
+ {
+ "parts":[{
+ "text": "Please summarize this transcript"
+ }],
+ "role": "user"
+ },
+ ],
+ "cachedContent": "'$CACHE_NAME'"
+ }'
+# [END cache_generate_content]
+# [END cache_create]
+rm a11.txt request.json
+
+echo "[START cache_list]"
+# [START cache_list]
+curl "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GEMINI_API_KEY"
+# [END cache_list]
+
+echo "[START cache_get]"
+# [START cache_get]
+curl "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GEMINI_API_KEY"
+# [END cache_get]
+
+echo "[START cache_update]"
+# [START cache_update]
+curl -X PATCH "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -d '{"ttl": "600s"}'
+# [END cache_update]
+
+echo "[START cache_delete]"
+# [START cache_delete]
+curl -X DELETE "https://generativelanguage.googleapis.com/v1beta/$CACHE_NAME?key=$GEMINI_API_KEY"
+# [END cache_delete]
diff --git a/samples/rest/chat.sh b/samples/rest/chat.sh
new file mode 100644
index 000000000..0243e4152
--- /dev/null
+++ b/samples/rest/chat.sh
@@ -0,0 +1,93 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+echo "[START chat]"
+# [START chat]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {"role":"user",
+ "parts":[{
+ "text": "Hello"}]},
+ {"role": "model",
+ "parts":[{
+ "text": "Great to meet you. What would you like to know?"}]},
+ {"role":"user",
+ "parts":[{
+ "text": "I have two dogs in my house. How many paws are in my house?"}]},
+ ]
+ }' 2> /dev/null | grep "text"
+# [END chat]
+
+echo "[START chat_streaming]"
+# [START chat_streaming]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {"role":"user",
+ "parts":[{
+ "text": "Hello"}]},
+ {"role": "model",
+ "parts":[{
+ "text": "Great to meet you. What would you like to know?"}]},
+ {"role":"user",
+ "parts":[{
+ "text": "I have two dogs in my house. How many paws are in my house?"}]},
+ ]
+ }' 2> /dev/null | grep "text"
+# [END chat_streaming]
+
+echo "[START chat_streaming_with_images]"
+# [START chat_streaming_with_images]
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {
+ "role": "user",
+ "parts": [
+ {
+ "text": "Hello, I am interested in learning about musical instruments. Can I show you one?"
+ }
+ ]
+ },
+ {
+ "role": "model",
+ "parts": [
+ {
+ "text": "Certainly."
+ },
+ ]
+ },
+ {
+ "role": "user",
+ "parts": [
+ {
+ "text": "Tell me about this instrument"
+ },
+ {
+ "inline_data": {
+ "mime_type": "image/jpeg",
+ "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
+ }
+ }
+ ]
+ }
+ ]
+ }' 2> /dev/null | grep "text"
+# [END chat_streaming_with_images]
diff --git a/samples/rest/code_execution.sh b/samples/rest/code_execution.sh
new file mode 100644
index 000000000..f134e728f
--- /dev/null
+++ b/samples/rest/code_execution.sh
@@ -0,0 +1,58 @@
+set -eu
+
+echo "[START code_execution_basic]"
+# [START code_execution_basic]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d ' {"tools": [{"code_execution": {}}],
+ "contents": {
+ "parts":
+ {
+ "text": "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."
+ }
+ },
+}'
+# [END code_execution_basic]
+
+echo "[START code_execution_chat]"
+# [START code_execution_chat]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{"tools": [{"code_execution": {}}],
+ "contents": [
+ {
+ "role": "user",
+ "parts": [{
+ "text": "Can you print \"Hello world!\"?"
+ }]
+ },{
+ "role": "model",
+ "parts": [
+ {
+ "text": ""
+ },
+ {
+ "executable_code": {
+ "language": "PYTHON",
+ "code": "\nprint(\"hello world!\")\n"
+ }
+ },
+ {
+ "code_execution_result": {
+ "outcome": "OUTCOME_OK",
+ "output": "hello world!\n"
+ }
+ },
+ {
+ "text": "I have printed \"hello world!\" using the provided python code block. \n"
+ }
+ ],
+ },{
+ "role": "user",
+ "parts": [{
+ "text": "What is the sum of the first 50 prime numbers? Generate and run code for the calculation, and make sure you get all 50."
+ }]
+ }
+ ]
+}'
+# [END code_execution_chat]
diff --git a/samples/rest/configure_model_parameters.sh b/samples/rest/configure_model_parameters.sh
new file mode 100644
index 000000000..2e6e31b35
--- /dev/null
+++ b/samples/rest/configure_model_parameters.sh
@@ -0,0 +1,24 @@
+set -eu
+
+echo "[START configure_model_parameters]"
+# [START configure_model_parameters]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Explain how AI works"}
+ ]
+ }],
+ "generationConfig": {
+ "stopSequences": [
+ "Title"
+ ],
+ "temperature": 1.0,
+ "maxOutputTokens": 800,
+ "topP": 0.8,
+ "topK": 10
+ }
+ }' 2> /dev/null | grep "text"
+# [END configure_model_parameters]
diff --git a/samples/rest/controlled_generation.sh b/samples/rest/controlled_generation.sh
new file mode 100644
index 000000000..352b435de
--- /dev/null
+++ b/samples/rest/controlled_generation.sh
@@ -0,0 +1,44 @@
+set -eu
+
+echo "json_controlled_generation"
+# [START json_controlled_generation]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{
+ "contents": [{
+ "parts":[
+ {"text": "List 5 popular cookie recipes"}
+ ]
+ }],
+ "generationConfig": {
+ "response_mime_type": "application/json",
+ "response_schema": {
+ "type": "ARRAY",
+ "items": {
+ "type": "OBJECT",
+ "properties": {
+ "recipe_name": {"type":"STRING"},
+ }
+ }
+ }
+ }
+}' 2> /dev/null | head
+# [END json_controlled_generation]
+
+echo "json_no_schema"
+# [START json_no_schema]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{
+ "contents": [{
+ "parts":[
+ {"text": "List a few popular cookie recipes using this JSON schema:
+
+ Recipe = {\"recipe_name\": str}
+ Return: list[Recipe]"
+ }
+ ]
+ }],
+ "generationConfig": { "response_mime_type": "application/json" }
+}' 2> /dev/null | head
+# [END json_no_schema]
diff --git a/samples/rest/count_tokens.sh b/samples/rest/count_tokens.sh
new file mode 100644
index 000000000..3c6be6719
--- /dev/null
+++ b/samples/rest/count_tokens.sh
@@ -0,0 +1,288 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+TEXT_PATH=${MEDIA_DIR}/poem.txt
+A11_PATH=${MEDIA_DIR}/a11.txt
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+AUDIO_PATH=${MEDIA_DIR}/sample.mp3
+VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
+
+BASE_URL="https://generativelanguage.googleapis.com"
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
+echo "[START tokens_context_window]"
+# [START tokens_context_window]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro?key=$GEMINI_API_KEY > model.json
+jq .inputTokenLimit model.json
+jq .outputTokenLimit model.json
+# [END tokens_context_window]
+
+echo "[START tokens_text_only]"
+# [START tokens_text_only]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[{
+ "text": "The quick brown fox jumps over the lazy dog."
+ }],
+ }],
+ }'
+# [END tokens_text_only]
+
+echo "[START tokens_chat]"
+# [START tokens_chat]
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [
+ {"role": "user",
+ "parts": [{"text": "Hi, my name is Bob."}],
+ },
+ {"role": "model",
+ "parts":[{"text": "Hi Bob"}],
+ },
+ ],
+ }'
+# [END tokens_chat]
+
+echo "[START tokens_multimodal_image_inline]"
+# [START tokens_multimodal_image_inline]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:countTokens?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Tell me about this instrument"},
+ {
+ "inline_data": {
+ "mime_type":"image/jpeg",
+ "data": "'$(base64 $B64FLAGS $IMG_PATH)'"
+ }
+ }
+ ]
+ }]
+ }' 2> /dev/null
+# [END tokens_multimodal_image_inline]
+
+echo "[START tokens_multimodal_image_file_api]"
+# [START tokens_multimodal_image_file_api]
+MIME_TYPE=$(file -b --mime-type "${IMG_PATH}")
+NUM_BYTES=$(wc -c < "${IMG_PATH}")
+DISPLAY_NAME=TEXT
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${IMG_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you tell me about the instruments in this photo?"},
+ {"file_data":
+ {"mime_type": "image/jpeg",
+ "file_uri": '$file_uri'}
+ }]
+ }]
+ }'
+# [END tokens_multimodal_image_file_api]
+
+echo "# [START tokens_multimodal_video_audio_file_api]"
+# [START tokens_multimodal_video_audio_file_api]
+
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO_PATH
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GOOGLE_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+
+state=$(jq ".file.state" file_info.json)
+
+name=$(jq ".file.name" file_info.json)
+
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Describe this video clip"},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }'
+# [END tokens_multimodal_video_audio_file_api]
+
+echo "[START tokens_cached_content]"
+# [START tokens_cached_content]
+echo '{
+ "model": "models/gemini-1.5-flash-001",
+ "contents":[
+ {
+ "parts":[
+ {
+ "inline_data": {
+ "mime_type":"text/plain",
+ "data": "'$(base64 $B64FLAGS $A11_PATH)'"
+ }
+ }
+ ],
+ "role": "user"
+ }
+ ],
+ "systemInstruction": {
+ "parts": [
+ {
+ "text": "You are an expert at analyzing transcripts."
+ }
+ ]
+ },
+ "ttl": "300s"
+}' > request.json
+
+curl -X POST "https://generativelanguage.googleapis.com/v1beta/cachedContents?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -d @request.json \
+ > cache.json
+
+jq .usageMetadata.totalTokenCount cache.json
+# [END tokens_cached_content]
+
+echo "[START tokens_system_instruction]"
+# [START tokens_system_instruction]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{ "system_instruction": {
+ "parts":
+ { "text": "You are a cat. Your name is Neko."}},
+ "contents": {
+ "parts": {
+ "text": "Hello there"}}}' > system_instructions.json
+
+jq .usageMetadata.totalTokenCount system_instructions.json
+# [END tokens_system_instruction]
+
+echo "[START tokens_tools]"
+# [START tokens_tools]
+cat > tools.json << EOF
+{
+ "function_declarations": [
+ {
+ "name": "enable_lights",
+ "description": "Turn on the lighting system.",
+ "parameters": { "type": "object" }
+ },
+ {
+ "name": "set_light_color",
+ "description": "Set the light color. Lights must be enabled for this to work.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "rgb_hex": {
+ "type": "string",
+ "description": "The light color as a 6-digit hex string, e.g. ff0000 for red."
+ }
+ },
+ "required": [
+ "rgb_hex"
+ ]
+ }
+ },
+ {
+ "name": "stop_lights",
+ "description": "Turn off the lighting system.",
+ "parameters": { "type": "object" }
+ }
+ ]
+}
+EOF
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-pro-latest:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -d '
+ {
+ "system_instruction": {
+ "parts": {
+ "text": "You are a helpful lighting system bot. You can turn lights on and off, and you can set the color. Do not perform any other tasks."
+ }
+ },
+ "tools": ['$(source "$tools")'],
+
+ "tool_config": {
+ "function_calling_config": {"mode": "none"}
+ },
+
+ "contents": {
+ "role": "user",
+ "parts": {
+ "text": "What can you do?"
+ }
+ }
+ }
+' > tools_output.json
+
+jq .usageMetadata.totalTokenCount tools_output.json
+# [END tokens_tools]
diff --git a/samples/rest/embed.sh b/samples/rest/embed.sh
new file mode 100644
index 000000000..49f6ddfcb
--- /dev/null
+++ b/samples/rest/embed.sh
@@ -0,0 +1,32 @@
+set -eu
+
+echo "[START embed_content]"
+# [START embed_content]
+curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:embedContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{"model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "Hello world"}]}, }' 2> /dev/null | head
+# [END embed_content]
+
+echo "[START batch_embed_contents]"
+# [START batch_embed_contents]
+curl "https://generativelanguage.googleapis.com/v1beta/models/text-embedding-004:batchEmbedContents?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{"requests": [{
+ "model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "What is the meaning of life?"}]}, },
+ {
+ "model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "How much wood would a woodchuck chuck?"}]}, },
+ {
+ "model": "models/text-embedding-004",
+ "content": {
+ "parts":[{
+ "text": "How does the brain work?"}]}, }, ]}' 2> /dev/null | grep -C 5 values
+# [END batch_embed_contents]
diff --git a/samples/rest/files.sh b/samples/rest/files.sh
new file mode 100644
index 000000000..dd72db3b1
--- /dev/null
+++ b/samples/rest/files.sh
@@ -0,0 +1,300 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+TEXT_PATH=${MEDIA_DIR}/poem.txt
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+IMG_PATH_2=${MEDIA_DIR}/Cajun_instruments.jpg
+AUDIO_PATH=${MEDIA_DIR}/sample.mp3
+VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
+PDF_PATH=${MEDIA_DIR}/test.pdf
+
+BASE_URL="https://generativelanguage.googleapis.com"
+
+echo "[START files_create_text]"
+# [START files_create_text]
+MIME_TYPE=$(file -b --mime-type "${TEXT_PATH}")
+NUM_BYTES=$(wc -c < "${TEXT_PATH}")
+DISPLAY_NAME=TEXT
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${TEXT_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you add a few more lines to this poem?"},
+ {"file_data":{"mime_type": "text/plain", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+
+echo "[START files_get]"
+# [START files_get]
+name=$(jq ".file.name" file_info.json)
+# Get the file of interest to check state
+curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+# Print some information about the file you got
+name=$(jq ".file.name" file_info.json)
+echo name=$name
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+# [END files_get]
+
+echo "[START files_delete]"
+# [START files_delete]
+curl --request "DELETE" https://generativelanguage.googleapis.com/v1beta/files/$name?key=$GEMINI_API_KEY
+# [END files_delete]
+
+# [END files_create_text]
+
+echo "[START files_create_image]"
+# [START files_create_image]
+MIME_TYPE=$(file -b --mime-type "${IMG_PATH_2}")
+NUM_BYTES=$(wc -c < "${IMG_PATH_2}")
+DISPLAY_NAME=TEXT
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${IMG_PATH_2}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you tell me about the instruments in this photo?"},
+ {"file_data":
+ {"mime_type": "image/jpeg",
+ "file_uri": '$file_uri'}
+ }]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_image]
+
+echo "[START files_create_audio]"
+# [START files_create_audio]
+MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}")
+NUM_BYTES=$(wc -c < "${AUDIO_PATH}")
+DISPLAY_NAME=AUDIO
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Describe this audio clip"},
+ {"file_data":{"mime_type": "audio/mp3", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_audio]
+
+echo "[START files_create_video]"
+# [START files_create_video]
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO_PATH
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+# Ensure the state of the video is 'ACTIVE'
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Describe this video clip"},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_video]
+
+echo "[START files_create_pdf]"
+# [START files_create_pdf]
+NUM_BYTES=$(wc -c < "${PDF_PATH}")
+DISPLAY_NAME=TEXT
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: application/pdf" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${PDF_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GOOGLE_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you add a few more lines to this poem?"},
+ {"file_data":{"mime_type": "application/pdf", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END files_create_pdf]
+
+echo "[START files_list]"
+# [START files_list]
+echo "My files: "
+
+curl "https://generativelanguage.googleapis.com/v1beta/files?key=$GEMINI_API_KEY"
+# [END files_list]
diff --git a/samples/rest/function_calling.sh b/samples/rest/function_calling.sh
new file mode 100644
index 000000000..a0e0fa28d
--- /dev/null
+++ b/samples/rest/function_calling.sh
@@ -0,0 +1,60 @@
+set -eu
+
+echo "[START function_calling]"
+# [START function_calling]
+
+cat > tools.json << EOF
+{
+ "function_declarations": [
+ {
+ "name": "enable_lights",
+ "description": "Turn on the lighting system."
+ },
+ {
+ "name": "set_light_color",
+ "description": "Set the light color. Lights must be enabled for this to work.",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "rgb_hex": {
+ "type": "string",
+ "description": "The light color as a 6-digit hex string, e.g. ff0000 for red."
+ }
+ },
+ "required": [
+ "rgb_hex"
+ ]
+ }
+ },
+ {
+ "name": "stop_lights",
+ "description": "Turn off the lighting system."
+ }
+ ]
+}
+EOF
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -d @<(echo '
+ {
+ "system_instruction": {
+ "parts": {
+ "text": "You are a helpful lighting system bot. You can turn lights on and off, and you can set the color. Do not perform any other tasks."
+ }
+ },
+ "tools": ['$(cat tools.json)'],
+
+ "tool_config": {
+ "function_calling_config": {"mode": "auto"}
+ },
+
+ "contents": {
+ "role": "user",
+ "parts": {
+ "text": "Turn on the lights please."
+ }
+ }
+ }
+') 2>/dev/null |sed -n '/"content"/,/"finishReason"/p'
+# [END function_calling]
diff --git a/samples/rest/models.sh b/samples/rest/models.sh
index ebcd378ff..465d627d8 100644
--- a/samples/rest/models.sh
+++ b/samples/rest/models.sh
@@ -2,10 +2,10 @@ set -eu
echo "[START models_list]"
# [START models_list]
-curl https://generativelanguage.googleapis.com/v1beta/models?key=$GOOGLE_API_KEY
+curl https://generativelanguage.googleapis.com/v1beta/models?key=$GEMINI_API_KEY
# [END models_list]
echo "[START models_get]"
# [START models_get]
-curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro?key=$GOOGLE_API_KEY
+curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash?key=$GEMINI_API_KEY
# [END models_get]
diff --git a/samples/rest/safety_settings.sh b/samples/rest/safety_settings.sh
new file mode 100644
index 000000000..a087307db
--- /dev/null
+++ b/samples/rest/safety_settings.sh
@@ -0,0 +1,34 @@
+set -eu
+
+echo "[START safety_settings]"
+# [START safety_settings]
+ echo '{
+ "safetySettings": [
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"}
+ ],
+ "contents": [{
+ "parts":[{
+ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
+
+ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d @request.json 2> /dev/null
+# [END safety_settings]
+
+echo "[START safety_settings_multi]"
+# [START safety_settings_multi]
+ echo '{
+ "safetySettings": [
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_ONLY_HIGH"},
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"}
+ ],
+ "contents": [{
+ "parts":[{
+ "text": "'I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them.'"}]}]}' > request.json
+
+ curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d @request.json 2> /dev/null
+# [END safety_settings_multi]
diff --git a/samples/rest/system_instruction.sh b/samples/rest/system_instruction.sh
new file mode 100644
index 000000000..44f77ea04
--- /dev/null
+++ b/samples/rest/system_instruction.sh
@@ -0,0 +1,13 @@
+set -eu
+
+echo "[START system_instruction]"
+# [START system_instruction]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+-H 'Content-Type: application/json' \
+-d '{ "system_instruction": {
+ "parts":
+ { "text": "You are a cat. Your name is Neko."}},
+ "contents": {
+ "parts": {
+ "text": "Hello there"}}}'
+# [END system_instruction]
diff --git a/samples/rest/text_generation.sh b/samples/rest/text_generation.sh
new file mode 100755
index 000000000..fc21e7d00
--- /dev/null
+++ b/samples/rest/text_generation.sh
@@ -0,0 +1,492 @@
+set -eu
+
+SCRIPT_DIR=$(dirname "$0")
+MEDIA_DIR=$(realpath ${SCRIPT_DIR}/../../third_party)
+
+IMG_PATH=${MEDIA_DIR}/organ.jpg
+IMG_PATH2=${MEDIA_DIR}/Cajun_instruments.jpg
+AUDIO_PATH=${MEDIA_DIR}/sample.mp3
+VIDEO_PATH=${MEDIA_DIR}/Big_Buck_Bunny.mp4
+PDF_PATH=${MEDIA_DIR}/test.pdf
+
+if [[ "$(base64 --version 2>&1)" = *"FreeBSD"* ]]; then
+ B64FLAGS="--input"
+else
+ B64FLAGS="-w0"
+fi
+
+BASE_URL="https://generativelanguage.googleapis.com"
+
+echo "[START text_gen_text_only_prompt]"
+# [START text_gen_text_only_prompt]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[{"text": "Write a story about a magic backpack."}]
+ }]
+ }' 2> /dev/null
+# [END text_gen_text_only_prompt]
+
+echo "[START text_gen_text_only_prompt_streaming]"
+# [START text_gen_text_only_prompt_streaming]
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=${GEMINI_API_KEY}" \
+ -H 'Content-Type: application/json' \
+ --no-buffer \
+ -d '{ "contents":[{"parts":[{"text": "Write a story about a magic backpack."}]}]}'
+# [END text_gen_text_only_prompt_streaming]
+
+echo "[START text_gen_multimodal_one_image_prompt]"
+# [START text_gen_multimodal_one_image_prompt]
+# Use a temporary file to hold the base64 encoded image data
+TEMP_B64=$(mktemp)
+trap 'rm -f "$TEMP_B64"' EXIT
+base64 $B64FLAGS $IMG_PATH > "$TEMP_B64"
+
+# Use a temporary file to hold the JSON payload
+TEMP_JSON=$(mktemp)
+trap 'rm -f "$TEMP_JSON"' EXIT
+
+cat > "$TEMP_JSON" << EOF
+{
+ "contents": [{
+ "parts":[
+ {"text": "Tell me about this instrument"},
+ {
+ "inline_data": {
+ "mime_type":"image/jpeg",
+ "data": "$(cat "$TEMP_B64")"
+ }
+ }
+ ]
+ }]
+}
+EOF
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d "@$TEMP_JSON" 2> /dev/null
+# [END text_gen_multimodal_one_image_prompt]
+
+echo "[START text_gen_multimodal_one_image_prompt_streaming]"
+# [START text_gen_multimodal_one_image_prompt_streaming]
+cat > "$TEMP_JSON" << EOF
+{
+ "contents": [{
+ "parts":[
+ {"text": "Tell me about this instrument"},
+ {
+ "inline_data": {
+ "mime_type":"image/jpeg",
+ "data": "$(cat "$TEMP_B64")"
+ }
+ }
+ ]
+ }]
+}
+EOF
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d "@$TEMP_JSON" 2> /dev/null
+# [END text_gen_multimodal_one_image_prompt_streaming]
+
+echo "[START text_gen_multimodal_two_image_prompt]"
+# [START text_gen_multimodal_two_image_prompt]
+# Base64 encode both images into temporary files
+TEMP_B64_1=$(mktemp)
+TEMP_B64_2=$(mktemp)
+trap 'rm -f "$TEMP_B64_1" "$TEMP_B64_2"' EXIT
+base64 $B64FLAGS "$IMG_PATH" > "$TEMP_B64_1"
+base64 $B64FLAGS "$IMG_PATH2" > "$TEMP_B64_2"
+
+# Create the JSON payload using the base64 data from both images
+cat > "$TEMP_JSON" << EOF
+{
+ "contents": [{
+ "parts":[
+ {
+ "inline_data": {
+ "mime_type": "image/jpeg",
+ "data": "$(cat "$TEMP_B64_1")"
+ }
+ },
+ {
+ "inline_data": {
+ "mime_type": "image/jpeg",
+ "data": "$(cat "$TEMP_B64_2")"
+ }
+ },
+ {
+ "text": "Generate a list of all the objects contained in both images."
+ }
+ ]
+ }]
+}
+EOF
+
+# Make the API request using the JSON file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d "@$TEMP_JSON" 2> /dev/null > response.json
+
+# Display the response
+cat response.json
+# [END text_gen_multimodal_two_image_prompt]
+
+echo "[START text_gen_multimodal_one_image_bounding_box_prompt]"
+# [START text_gen_multimodal_one_image_bounding_box_prompt]
+# Re-use TEMP_B64_2 (from the previous two-image prompt) and TEMP_JSON
+
+# Create the JSON payload for bounding box detection
+cat > "$TEMP_JSON" << EOF
+{
+ "contents": [{
+ "parts":[
+ {
+ "inline_data": {
+ "mime_type": "image/jpeg",
+ "data": "$(cat "$TEMP_B64_2")"
+ }
+ },
+ {
+ "text": "Generate bounding boxes for each of the objects in this image in [y_min, x_min, y_max, x_max] format."
+ }
+ ]
+ }]
+}
+EOF
+
+# Make the API request using the JSON file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d "@$TEMP_JSON" 2> /dev/null > response.json
+
+cat response.json
+# [END text_gen_multimodal_one_image_bounding_box_prompt]
+
+echo "[START text_gen_multimodal_audio]"
+# [START text_gen_multimodal_audio]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}")
+NUM_BYTES=$(wc -c < "${AUDIO_PATH}")
+DISPLAY_NAME=AUDIO
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Please describe this file."},
+ {"file_data":{"mime_type": "audio/mpeg", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END text_gen_multimodal_audio]
+
+echo "[START text_gen_multimodal_audio_streaming]"
+# [START text_gen_multimodal_audio_streaming]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${AUDIO_PATH}")
+NUM_BYTES=$(wc -c < "${AUDIO_PATH}")
+DISPLAY_NAME=AUDIO
+
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${AUDIO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Please describe this file."},
+ {"file_data":{"mime_type": "audio/mpeg", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+# [END text_gen_multimodal_audio_streaming]
+
+echo "[START text_gen_multimodal_video_prompt]"
+# [START text_gen_multimodal_video_prompt]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D "${tmp_header_file}" \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+name=$(jq ".file.name" file_info.json)
+echo name=$name
+
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Transcribe the audio from this video, giving timestamps for salient events in the video. Also provide visual descriptions."},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END text_gen_multimodal_video_prompt]
+
+echo "[START text_gen_multimodal_video_prompt_streaming]"
+# [START text_gen_multimodal_video_prompt_streaming]
+# Use File API to upload audio data to API request.
+MIME_TYPE=$(file -b --mime-type "${VIDEO_PATH}")
+NUM_BYTES=$(wc -c < "${VIDEO_PATH}")
+DISPLAY_NAME=VIDEO_PATH
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${VIDEO_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+state=$(jq ".file.state" file_info.json)
+echo state=$state
+
+while [[ "($state)" = *"PROCESSING"* ]];
+do
+ echo "Processing video..."
+ sleep 5
+ # Get the file of interest to check state
+ curl https://generativelanguage.googleapis.com/v1beta/files/$name > file_info.json
+ state=$(jq ".file.state" file_info.json)
+done
+
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Please describe this file."},
+ {"file_data":{"mime_type": "video/mp4", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+# [END text_gen_multimodal_video_prompt_streaming]
+
+echo "[START text_gen_multimodal_pdf]"
+# [START text_gen_multimodal_pdf]
+MIME_TYPE=$(file -b --mime-type "${PDF_PATH}")
+NUM_BYTES=$(wc -c < "${PDF_PATH}")
+DISPLAY_NAME=TEXT
+
+
+echo $MIME_TYPE
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${PDF_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you add a few more lines to this poem?"},
+ {"file_data":{"mime_type": "application/pdf", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+
+jq ".candidates[].content.parts[].text" response.json
+# [END text_gen_multimodal_pdf]
+
+echo "[START text_gen_multimodal_pdf_streaming]"
+# [START text_gen_multimodal_pdf_streaming]
+MIME_TYPE=$(file -b --mime-type "${PDF_PATH}")
+NUM_BYTES=$(wc -c < "${PDF_PATH}")
+DISPLAY_NAME=TEXT
+
+
+echo $MIME_TYPE
+tmp_header_file=upload-header.tmp
+
+# Initial resumable request defining metadata.
+# The upload url is in the response headers dump them to a file.
+curl "${BASE_URL}/upload/v1beta/files?key=${GEMINI_API_KEY}" \
+ -D upload-header.tmp \
+ -H "X-Goog-Upload-Protocol: resumable" \
+ -H "X-Goog-Upload-Command: start" \
+ -H "X-Goog-Upload-Header-Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Header-Content-Type: ${MIME_TYPE}" \
+ -H "Content-Type: application/json" \
+ -d "{'file': {'display_name': '${DISPLAY_NAME}'}}" 2> /dev/null
+
+upload_url=$(grep -i "x-goog-upload-url: " "${tmp_header_file}" | cut -d" " -f2 | tr -d "\r")
+rm "${tmp_header_file}"
+
+# Upload the actual bytes.
+curl "${upload_url}" \
+ -H "Content-Length: ${NUM_BYTES}" \
+ -H "X-Goog-Upload-Offset: 0" \
+ -H "X-Goog-Upload-Command: upload, finalize" \
+ --data-binary "@${PDF_PATH}" 2> /dev/null > file_info.json
+
+file_uri=$(jq ".file.uri" file_info.json)
+echo file_uri=$file_uri
+
+# Now generate content using that file
+curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse&key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -X POST \
+ -d '{
+ "contents": [{
+ "parts":[
+ {"text": "Can you add a few more lines to this poem?"},
+ {"file_data":{"mime_type": "application/pdf", "file_uri": '$file_uri'}}]
+ }]
+ }' 2> /dev/null > response.json
+
+cat response.json
+echo
+# [END text_gen_multimodal_pdf_streaming]
diff --git a/samples/rest/tuned_models.sh b/samples/rest/tuned_models.sh
new file mode 100644
index 000000000..0e32f97a0
--- /dev/null
+++ b/samples/rest/tuned_models.sh
@@ -0,0 +1,143 @@
+set -eu
+
+echo "[START tuned_models_create]"
+# [START tuned_models_create]
+curl -X POST "https://generativelanguage.googleapis.com/v1beta/tunedModels?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ -d '
+ {
+ "display_name": "number generator model",
+ "base_model": "models/gemini-1.5-flash-001-tuning",
+ "tuning_task": {
+ "hyperparameters": {
+ "batch_size": 2,
+ "learning_rate": 0.001,
+ "epoch_count":5,
+ },
+ "training_data": {
+ "examples": {
+ "examples": [
+ {
+ "text_input": "1",
+ "output": "2",
+ },{
+ "text_input": "3",
+ "output": "4",
+ },{
+ "text_input": "-3",
+ "output": "-2",
+ },{
+ "text_input": "twenty two",
+ "output": "twenty three",
+ },{
+ "text_input": "two hundred",
+ "output": "two hundred one",
+ },{
+ "text_input": "ninety nine",
+ "output": "one hundred",
+ },{
+ "text_input": "8",
+ "output": "9",
+ },{
+ "text_input": "-98",
+ "output": "-97",
+ },{
+ "text_input": "1,000",
+ "output": "1,001",
+ },{
+ "text_input": "10,100,000",
+ "output": "10,100,001",
+ },{
+ "text_input": "thirteen",
+ "output": "fourteen",
+ },{
+ "text_input": "eighty",
+ "output": "eighty one",
+ },{
+ "text_input": "one",
+ "output": "two",
+ },{
+ "text_input": "three",
+ "output": "four",
+ },{
+ "text_input": "seven",
+ "output": "eight",
+ }
+ ]
+ }
+ }
+ }
+ }' | tee tunemodel.json
+
+# Check the operation for status updates during training.
+# Note: you can only check the operation on v1/
+operation=$(cat tunemodel.json | jq ".name" | tr -d '"')
+tuning_done=false
+
+while [[ "$tuning_done" != "true" ]];
+do
+ sleep 5
+ curl -X GET "https://generativelanguage.googleapis.com/v1/${operation}?key=$GEMINI_API_KEY" \
+ -H 'Content-Type: application/json' \
+ 2> /dev/null > tuning_operation.json
+
+ complete=$(jq .metadata.completedPercent < tuning_operation.json)
+ tput cuu1
+ tput el
+ echo "Tuning...${complete}%"
+ tuning_done=$(jq .done < tuning_operation.json)
+done
+
+# Or get the TunedModel and check it's state. The model is ready to use if the state is active.
+modelname=$(cat tunemodel.json | jq ".metadata.tunedModel" | tr -d '"')
+curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' > tuned_model.json
+
+cat tuned_model.json | jq ".state"
+# [END tuned_models_create]
+
+
+echo "[START tuned_models_generate_content]"
+# [START tuned_models_generate_content]
+curl -X POST https://generativelanguage.googleapis.com/v1beta/$modelname:generateContent?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "contents": [{
+ "parts": [{
+ "text": "LXIII"
+ }]
+ }]
+ }' 2> /dev/null
+# [END tuned_models_generate_content]
+
+echo "[START tuned_models_get]"
+# [START tuned_models_get]
+curl -X GET https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json' | grep state
+# [END tuned_models_get]
+
+echo "[START tuned_models_list]"
+# [START tuned_models_list]
+# Sending a page_size is optional
+curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5 \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer ${access_token}" \
+ -H "x-goog-user-project: ${project_id}" > tuned_models.json
+
+jq .tunedModels[].name < tuned_models.json
+
+# Send the nextPageToken to get the next page.
+page_token=$(jq .nextPageToken < tuned_models.json | tr -d '"')
+
+if [[ "$page_token" != "null"" ]]; then
+curl -X GET https://generativelanguage.googleapis.com/v1beta/tunedModels?page_size=5\&page_token=${page_token}?key=$GEMINI_API_KEY \
+ -H "Content-Type: application/json" > tuned_models2.json
+jq .tunedModels[].name < tuned_models.json
+fi
+# [END tuned_models_list]
+
+echo "[START tuned_models_delete]"
+# [START tuned_models_delete]
+curl -X DELETE https://generativelanguage.googleapis.com/v1beta/${modelname}?key=$GEMINI_API_KEY \
+ -H 'Content-Type: application/json'
+# [END tuned_models_delete]
diff --git a/samples/safety_settings.py b/samples/safety_settings.py
index 84c70d980..49b099dad 100644
--- a/samples/safety_settings.py
+++ b/samples/safety_settings.py
@@ -14,12 +14,12 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
-
class UnitTests(absltest.TestCase):
def test_safety_settings(self):
# [START safety_settings]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
unsafe_prompt = "I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them."
response = model.generate_content(
@@ -33,6 +33,8 @@ def test_safety_settings(self):
def test_safety_settings_multi(self):
# [START safety_settings_multi]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
unsafe_prompt = "I support Martians Soccer Club and I think Jupiterians Football Club sucks! Write a ironic phrase about them."
response = model.generate_content(
diff --git a/samples/system_instruction.py b/samples/system_instruction.py
index a87932c95..bf486ec45 100644
--- a/samples/system_instruction.py
+++ b/samples/system_instruction.py
@@ -14,12 +14,12 @@
# limitations under the License.
from absl.testing import absltest
-import google.generativeai as genai
-
class UnitTests(absltest.TestCase):
def test_system_instructions(self):
# [START system_instruction]
+ import google.generativeai as genai
+
model = genai.GenerativeModel(
"models/gemini-1.5-flash",
system_instruction="You are a cat. Your name is Neko.",
diff --git a/samples/text_generation.py b/samples/text_generation.py
index a5e800c75..ab57149b2 100644
--- a/samples/text_generation.py
+++ b/samples/text_generation.py
@@ -12,10 +12,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import PIL.Image
from absl.testing import absltest
-import google.generativeai as genai
import pathlib
media = pathlib.Path(__file__).parents[1] / "third_party"
@@ -24,6 +22,8 @@
class UnitTests(absltest.TestCase):
def test_text_gen_text_only_prompt(self):
# [START text_gen_text_only_prompt]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Write a story about a magic backpack.")
print(response.text)
@@ -31,6 +31,8 @@ def test_text_gen_text_only_prompt(self):
def test_text_gen_text_only_prompt_streaming(self):
# [START text_gen_text_only_prompt_streaming]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Write a story about a magic backpack.", stream=True)
for chunk in response:
@@ -40,7 +42,9 @@ def test_text_gen_text_only_prompt_streaming(self):
def test_text_gen_multimodal_one_image_prompt(self):
# [START text_gen_multimodal_one_image_prompt]
- import PIL
+ import google.generativeai as genai
+
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -50,7 +54,9 @@ def test_text_gen_multimodal_one_image_prompt(self):
def test_text_gen_multimodal_one_image_prompt_streaming(self):
# [START text_gen_multimodal_one_image_prompt_streaming]
- import PIL
+ import google.generativeai as genai
+
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -62,7 +68,9 @@ def test_text_gen_multimodal_one_image_prompt_streaming(self):
def test_text_gen_multimodal_multi_image_prompt(self):
# [START text_gen_multimodal_multi_image_prompt]
- import PIL
+ import google.generativeai as genai
+
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -75,7 +83,9 @@ def test_text_gen_multimodal_multi_image_prompt(self):
def test_text_gen_multimodal_multi_image_prompt_streaming(self):
# [START text_gen_multimodal_multi_image_prompt_streaming]
- import PIL
+ import google.generativeai as genai
+
+ import PIL.Image
model = genai.GenerativeModel("gemini-1.5-flash")
organ = PIL.Image.open(media / "organ.jpg")
@@ -91,14 +101,31 @@ def test_text_gen_multimodal_multi_image_prompt_streaming(self):
def test_text_gen_multimodal_audio(self):
# [START text_gen_multimodal_audio]
+ import google.generativeai as genai
+
model = genai.GenerativeModel("gemini-1.5-flash")
sample_audio = genai.upload_file(media / "sample.mp3")
response = model.generate_content(["Give me a summary of this audio file.", sample_audio])
print(response.text)
# [END text_gen_multimodal_audio]
+ def test_text_gen_multimodal_audio_streaming(self):
+ # [START text_gen_multimodal_audio_streaming]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_audio = genai.upload_file(media / "sample.mp3")
+ response = model.generate_content(["Give me a summary of this audio file.", sample_audio])
+
+ for chunk in response:
+ print(chunk.text)
+ print("_" * 80)
+ # [END text_gen_multimodal_audio_streaming]
+
def test_text_gen_multimodal_video_prompt(self):
# [START text_gen_multimodal_video_prompt]
+ import google.generativeai as genai
+
import time
# Video clip (CC BY 3.0) from https://peach.blender.org/download/
@@ -112,20 +139,57 @@ def test_text_gen_multimodal_video_prompt(self):
myfile = genai.get_file(myfile.name)
model = genai.GenerativeModel("gemini-1.5-flash")
- result = model.generate_content([myfile, "Describe this video clip"])
- print(f"{result.text=}")
+ response = model.generate_content([myfile, "Describe this video clip"])
+ print(f"{response.text=}")
# [END text_gen_multimodal_video_prompt]
def test_text_gen_multimodal_video_prompt_streaming(self):
# [START text_gen_multimodal_video_prompt_streaming]
+ import google.generativeai as genai
+
+ import time
+
+ # Video clip (CC BY 3.0) from https://peach.blender.org/download/
+ myfile = genai.upload_file(media / "Big_Buck_Bunny.mp4")
+ print(f"{myfile=}")
+
+ # Videos need to be processed before you can use them.
+ while myfile.state.name == "PROCESSING":
+ print("processing video...")
+ time.sleep(5)
+ myfile = genai.get_file(myfile.name)
+
model = genai.GenerativeModel("gemini-1.5-flash")
- video = genai.upload_file(media / "Big_Buck_Bunny.mp4")
- response = model.generate_content(["Describe this video clip.", video], stream=True)
+
+ response = model.generate_content([myfile, "Describe this video clip"])
for chunk in response:
print(chunk.text)
print("_" * 80)
# [END text_gen_multimodal_video_prompt_streaming]
+ def test_text_gen_multimodal_pdf(self):
+ # [START text_gen_multimodal_pdf]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ response = model.generate_content(["Give me a summary of this document:", sample_pdf])
+ print(f"{response.text=}")
+ # [END text_gen_multimodal_pdf]
+
+ def test_text_gen_multimodal_pdf_streaming(self):
+ # [START text_gen_multimodal_pdf_streaming]
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel("gemini-1.5-flash")
+ sample_pdf = genai.upload_file(media / "test.pdf")
+ response = model.generate_content(["Give me a summary of this document:", sample_pdf])
+
+ for chunk in response:
+ print(chunk.text)
+ print("_" * 80)
+ # [END text_gen_multimodal_pdf_streaming]
+
if __name__ == "__main__":
absltest.main()
diff --git a/samples/tuned_models.py b/samples/tuned_models.py
index 29246347d..df12903ac 100644
--- a/samples/tuned_models.py
+++ b/samples/tuned_models.py
@@ -15,7 +15,6 @@
from absl.testing import absltest
import google
-import google.generativeai as genai
import pathlib
@@ -23,11 +22,15 @@
class UnitTests(absltest.TestCase):
- def test_tuned_models_create(self):
+ @classmethod
+ def setUpClass(cls):
+ # Code to run once before all tests in the class
# [START tuned_models_create]
+ import google.generativeai as genai
+
import time
- base_model = "models/gemini-1.0-pro-001"
+ base_model = "models/gemini-1.5-flash-001-tuning"
training_data = [
{"text_input": "1", "output": "2"},
# ... more examples ...
@@ -52,7 +55,7 @@ def test_tuned_models_create(self):
# You can use a tuned model here too. Set `source_model="tunedModels/..."`
display_name="increment",
source_model=base_model,
- epoch_count=20,
+ epoch_count=5,
batch_size=4,
learning_rate=0.001,
training_data=training_data,
@@ -61,40 +64,49 @@ def test_tuned_models_create(self):
for status in operation.wait_bar():
time.sleep(10)
- result = operation.result()
- print(result)
+ tuned_model = operation.result()
+ print(tuned_model)
# # You can plot the loss curve with:
# snapshots = pd.DataFrame(result.tuning_task.snapshots)
# sns.lineplot(data=snapshots, x='epoch', y='mean_loss')
- model = genai.GenerativeModel(model_name=result.name)
+ model = genai.GenerativeModel(model_name=tuned_model.name)
result = model.generate_content("III")
print(result.text) # IV
# [END tuned_models_create]
+ cls.tuned_model_name = tuned_model_name = tuned_model.name
+
def test_tuned_models_generate_content(self):
# [START tuned_models_generate_content]
- model = genai.GenerativeModel(model_name="tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model = genai.GenerativeModel(model_name=self.tuned_model_name)
result = model.generate_content("III")
print(result.text) # "IV"
- # [END tuned_models_create]
+ # [END tuned_models_generate_content]
def test_tuned_models_get(self):
# [START tuned_models_get]
- model_info = genai.get_model("tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model_info = genai.get_model(self.tuned_model_name)
print(model_info)
# [END tuned_models_get]
def test_tuned_models_list(self):
# [START tuned_models_list]
+ import google.generativeai as genai
+
for model_info in genai.list_tuned_models():
print(model_info.name)
# [END tuned_models_list]
def test_tuned_models_delete(self):
import time
+ import google.generativeai as genai
- base_model = "models/gemini-1.0-pro-001"
+ base_model = "models/gemini-1.5-flash-001-tuning"
training_data = samples / "increment_tuning_data.json"
try:
operation = genai.create_tuned_model(
@@ -102,7 +114,7 @@ def test_tuned_models_delete(self):
# You can use a tuned model here too. Set `source_model="tunedModels/..."`
display_name="increment",
source_model=base_model,
- epoch_count=20,
+ epoch_count=5,
batch_size=4,
learning_rate=0.001,
training_data=training_data,
@@ -114,6 +126,8 @@ def test_tuned_models_delete(self):
time.sleep(10)
# [START tuned_models_delete]
+ import google.generativeai as genai
+
model_name = "tunedModels/delete-this-model"
model_info = genai.get_model(model_name)
print(model_info)
@@ -124,7 +138,9 @@ def test_tuned_models_delete(self):
def test_tuned_models_permissions_create(self):
# [START tuned_models_permissions_create]
- model_info = genai.get_model("tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model_info = genai.get_model(self.tuned_model_name)
# [START_EXCLUDE]
for p in model_info.permissions.list():
if p.role.name != "OWNER":
@@ -148,7 +164,9 @@ def test_tuned_models_permissions_create(self):
def test_tuned_models_permissions_list(self):
# [START tuned_models_permissions_list]
- model_info = genai.get_model("tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model_info = genai.get_model(self.tuned_model_name)
# [START_EXCLUDE]
for p in model_info.permissions.list():
@@ -175,7 +193,9 @@ def test_tuned_models_permissions_list(self):
def test_tuned_models_permissions_get(self):
# [START tuned_models_permissions_get]
- model_info = genai.get_model("tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model_info = genai.get_model(self.tuned_model_name)
# [START_EXCLUDE]
for p in model_info.permissions.list():
@@ -197,7 +217,9 @@ def test_tuned_models_permissions_get(self):
def test_tuned_models_permissions_update(self):
# [START tuned_models_permissions_update]
- model_info = genai.get_model("tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model_info = genai.get_model(self.tuned_model_name)
# [START_EXCLUDE]
for p in model_info.permissions.list():
@@ -216,7 +238,9 @@ def test_tuned_models_permissions_update(self):
def test_tuned_models_permission_delete(self):
# [START tuned_models_permissions_delete]
- model_info = genai.get_model("tunedModels/my-increment-model")
+ import google.generativeai as genai
+
+ model_info = genai.get_model(self.tuned_model_name)
# [START_EXCLUDE]
for p in model_info.permissions.list():
if p.role.name != "OWNER":
diff --git a/setup.py b/setup.py
index b4b05e619..d8ab792a4 100644
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
+# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
@@ -36,13 +36,10 @@ def get_version():
version = get_version()
-if version[0] == "0":
- release_status = "Development Status :: 4 - Beta"
-else:
- release_status = "Development Status :: 5 - Production/Stable"
+release_status = "Development Status :: 7 - Inactive"
dependencies = [
- "google-ai-generativelanguage==0.6.6",
+ "google-ai-generativelanguage==0.6.15",
"google-api-core",
"google-api-python-client",
"google-auth>=2.15.0", # 2.15 adds API key auth support
@@ -86,8 +83,10 @@ def get_version():
"Programming Language :: Python :: 3.10", # Colab
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Typing :: Typed",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
@@ -97,4 +96,5 @@ def get_version():
extras_require=extras_require,
include_package_data=True,
zip_safe=False,
+ package_data={"generativeai": ["py.typed"]},
)
diff --git a/tests/notebook/lib/test_llm_function.py b/tests/notebook/lib/test_llm_function.py
index 896e49c88..008ed7a38 100644
--- a/tests/notebook/lib/test_llm_function.py
+++ b/tests/notebook/lib/test_llm_function.py
@@ -393,7 +393,7 @@ def _is_length_greater_than(lhs: Mapping[str, Any], rhs: Mapping[str, Any]) -> b
# Batch-based comparison function for post-processing.
def _sum_of_lengths(
- rows: Sequence[tuple[Mapping[str, Any], Mapping[str, Any]]]
+ rows: Sequence[tuple[Mapping[str, Any], Mapping[str, Any]]],
) -> Sequence[int]:
return [lhs["length"] + rhs["length"] for lhs, rhs in rows]
diff --git a/tests/test_client.py b/tests/test_client.py
index 0cc3e05eb..e6e4acfd4 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -29,6 +29,18 @@ def test_api_key_passed_via_client_options(self):
client_opts = client._client_manager.client_config["client_options"]
self.assertEqual(client_opts.api_key, "AIzA_client_opts")
+ @mock.patch.dict(os.environ, {"GEMINI_API_KEY": "AIzA_env"})
+ def test_api_key_from_environment(self):
+ # Default to API key loaded from environment.
+ client.configure()
+ client_opts = client._client_manager.client_config["client_options"]
+ self.assertEqual(client_opts.api_key, "AIzA_env")
+
+ # But not when a key is provided explicitly.
+ client.configure(api_key="AIzA_client")
+ client_opts = client._client_manager.client_config["client_options"]
+ self.assertEqual(client_opts.api_key, "AIzA_client")
+
@mock.patch.dict(os.environ, {"GOOGLE_API_KEY": "AIzA_env"})
def test_api_key_from_environment(self):
# Default to API key loaded from environment.
@@ -41,6 +53,30 @@ def test_api_key_from_environment(self):
client_opts = client._client_manager.client_config["client_options"]
self.assertEqual(client_opts.api_key, "AIzA_client")
+ @mock.patch.dict(os.environ, {"GEMINI_API_KEY": "", "GOOGLE_API_KEY": "AIzA_env"})
+ def test_empty_gemini_api_key_doesnt_shadow(self):
+ # Default to API key loaded from environment.
+ client.configure()
+ client_opts = client._client_manager.client_config["client_options"]
+ self.assertEqual(client_opts.api_key, "AIzA_env")
+
+ # But not when a key is provided explicitly.
+ client.configure(api_key="AIzA_client")
+ client_opts = client._client_manager.client_config["client_options"]
+ self.assertEqual(client_opts.api_key, "AIzA_client")
+
+ @mock.patch.dict(os.environ, {"GEMINI_API_KEY": "", "GOOGLE_API_KEY": "AIzA_env"})
+ def test_empty_google_api_key_doesnt_shadow(self):
+ # Default to API key loaded from environment.
+ client.configure()
+ client_opts = client._client_manager.client_config["client_options"]
+ self.assertEqual(client_opts.api_key, "AIzA_env")
+
+ # But not when a key is provided explicitly.
+ client.configure(api_key="AIzA_client")
+ client_opts = client._client_manager.client_config["client_options"]
+ self.assertEqual(client_opts.api_key, "AIzA_client")
+
def test_api_key_cannot_be_set_twice(self):
client_opts = client_options.ClientOptions(api_key="AIzA_client_opts")
@@ -58,11 +94,17 @@ def test_api_key_and_client_options(self):
self.assertEqual(actual_client_opts.api_endpoint, "web.site")
@parameterized.parameters(
- client.get_default_discuss_client,
- client.get_default_text_client,
- client.get_default_discuss_async_client,
+ client.get_default_cache_client,
+ client.get_default_file_client,
+ client.get_default_file_async_client,
+ client.get_default_generative_client,
+ client.get_default_generative_async_client,
client.get_default_model_client,
client.get_default_operations_client,
+ client.get_default_retriever_client,
+ client.get_default_retriever_async_client,
+ client.get_default_permission_client,
+ client.get_default_permission_async_client,
)
@mock.patch.dict(os.environ, {"GOOGLE_API_KEY": "AIzA_env"})
def test_configureless_client_with_key(self, factory_fn):
@@ -76,7 +118,7 @@ class DummyClient:
def __init__(self, *args, **kwargs):
pass
- def generate_text(self, metadata=None):
+ def generate_content(self, metadata=None):
self.metadata = metadata
not_a_function = 7
@@ -92,26 +134,26 @@ def static():
def classm(cls):
cls.called_classm = True
- @mock.patch.object(glm, "TextServiceClient", DummyClient)
+ @mock.patch.object(glm, "GenerativeServiceClient", DummyClient)
def test_default_metadata(self):
# The metadata wrapper injects this argument.
metadata = [("hello", "world")]
client.configure(default_metadata=metadata)
- text_client = client.get_default_text_client()
- text_client.generate_text()
+ generative_client = client.get_default_generative_client()
+ generative_client.generate_content()
- self.assertEqual(metadata, text_client.metadata)
+ self.assertEqual(metadata, generative_client.metadata)
- self.assertEqual(text_client.not_a_function, ClientTests.DummyClient.not_a_function)
+ self.assertEqual(generative_client.not_a_function, ClientTests.DummyClient.not_a_function)
# Since these don't have a metadata arg, they'll fail if the wrapper is applied.
- text_client._hidden()
- self.assertTrue(text_client.called_hidden)
+ generative_client._hidden()
+ self.assertTrue(generative_client.called_hidden)
- text_client.static()
+ generative_client.static()
- text_client.classm()
+ generative_client.classm()
self.assertTrue(ClientTests.DummyClient.called_classm)
def test_same_config(self):
diff --git a/tests/test_content.py b/tests/test_content.py
index b52858bb8..2031e40ae 100644
--- a/tests/test_content.py
+++ b/tests/test_content.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
+import enum
import pathlib
import typing_extensions
from typing import Any, Union, Iterable
@@ -35,6 +36,10 @@
TEST_JPG_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.jpg"
TEST_JPG_DATA = TEST_JPG_PATH.read_bytes()
+TEST_GIF_PATH = HERE / "test_img.gif"
+TEST_GIF_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.gif"
+TEST_GIF_DATA = TEST_GIF_PATH.read_bytes()
+
# simple test function
def datetime():
@@ -65,10 +70,33 @@ class ADataClassWithList:
a: list[int]
+class Choices(enum.Enum):
+ A = "a"
+ B = "b"
+ C = "c"
+ D = "d"
+
+
+@dataclasses.dataclass
+class HasEnum:
+ choice: Choices
+
+
class UnitTests(parameterized.TestCase):
+
@parameterized.named_parameters(
- ["PIL", PIL.Image.open(TEST_PNG_PATH)],
["RGBA", PIL.Image.fromarray(np.zeros([6, 6, 4], dtype=np.uint8))],
+ ["RGB", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8))],
+ ["P", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8)).convert("P")],
+ )
+ def test_numpy_to_blob(self, image):
+ blob = content_types.image_to_blob(image)
+ self.assertIsInstance(blob, protos.Blob)
+ self.assertEqual(blob.mime_type, "image/webp")
+ self.assertStartsWith(blob.data, b"RIFF \x00\x00\x00WEBPVP8L")
+
+ @parameterized.named_parameters(
+ ["PIL", PIL.Image.open(TEST_PNG_PATH)],
["IPython", IPython.display.Image(filename=TEST_PNG_PATH)],
)
def test_png_to_blob(self, image):
@@ -79,7 +107,6 @@ def test_png_to_blob(self, image):
@parameterized.named_parameters(
["PIL", PIL.Image.open(TEST_JPG_PATH)],
- ["RGB", PIL.Image.fromarray(np.zeros([6, 6, 3], dtype=np.uint8))],
["IPython", IPython.display.Image(filename=TEST_JPG_PATH)],
)
def test_jpg_to_blob(self, image):
@@ -88,6 +115,16 @@ def test_jpg_to_blob(self, image):
self.assertEqual(blob.mime_type, "image/jpeg")
self.assertStartsWith(blob.data, b"\xff\xd8\xff\xe0\x00\x10JFIF")
+ @parameterized.named_parameters(
+ ["PIL", PIL.Image.open(TEST_GIF_PATH)],
+ ["IPython", IPython.display.Image(filename=TEST_GIF_PATH)],
+ )
+ def test_gif_to_blob(self, image):
+ blob = content_types.image_to_blob(image)
+ self.assertIsInstance(blob, protos.Blob)
+ self.assertEqual(blob.mime_type, "image/gif")
+ self.assertStartsWith(blob.data, b"GIF87a")
+
@parameterized.named_parameters(
["BlobDict", {"mime_type": "image/png", "data": TEST_PNG_DATA}],
["protos.Blob", protos.Blob(mime_type="image/png", data=TEST_PNG_DATA)],
@@ -398,12 +435,78 @@ def no_args():
["empty_dictionary_list", [{"code_execution": {}}]],
)
def test_code_execution(self, tools):
- if isinstance(tools, Iterable):
- t = content_types._make_tools(tools)
- self.assertIsInstance(t[0].code_execution, protos.CodeExecution)
- else:
- t = content_types._make_tool(tools) # Pass code execution into tools
- self.assertIsInstance(t.code_execution, protos.CodeExecution)
+ t = content_types._make_tools(tools)
+ self.assertIsInstance(t[0].code_execution, protos.CodeExecution)
+
+ @parameterized.named_parameters(
+ ["string", "google_search_retrieval"],
+ ["empty_dictionary", {"google_search_retrieval": {}}],
+ [
+ "empty_dictionary_with_dynamic_retrieval_config",
+ {"google_search_retrieval": {"dynamic_retrieval_config": {}}},
+ ],
+ [
+ "dictionary_with_mode_integer",
+ {"google_search_retrieval": {"dynamic_retrieval_config": {"mode": 0}}},
+ ],
+ [
+ "dictionary_with_mode_string",
+ {"google_search_retrieval": {"dynamic_retrieval_config": {"mode": "DYNAMIC"}}},
+ ],
+ [
+ "dictionary_with_dynamic_retrieval_config",
+ {
+ "google_search_retrieval": {
+ "dynamic_retrieval_config": {"mode": "unspecified", "dynamic_threshold": 0.5}
+ }
+ },
+ ],
+ [
+ "proto_object",
+ protos.GoogleSearchRetrieval(
+ dynamic_retrieval_config=protos.DynamicRetrievalConfig(
+ mode="MODE_UNSPECIFIED", dynamic_threshold=0.5
+ )
+ ),
+ ],
+ [
+ "proto_passed_in",
+ protos.Tool(
+ google_search_retrieval=protos.GoogleSearchRetrieval(
+ dynamic_retrieval_config=protos.DynamicRetrievalConfig(
+ mode="MODE_UNSPECIFIED", dynamic_threshold=0.5
+ )
+ )
+ ),
+ ],
+ [
+ "proto_object_list",
+ [
+ protos.GoogleSearchRetrieval(
+ dynamic_retrieval_config=protos.DynamicRetrievalConfig(
+ mode="MODE_UNSPECIFIED", dynamic_threshold=0.5
+ )
+ )
+ ],
+ ],
+ [
+ "proto_passed_in_list",
+ [
+ protos.Tool(
+ google_search_retrieval=protos.GoogleSearchRetrieval(
+ dynamic_retrieval_config=protos.DynamicRetrievalConfig(
+ mode="MODE_UNSPECIFIED", dynamic_threshold=0.5
+ )
+ )
+ )
+ ],
+ ],
+ )
+ def test_search_grounding(self, tools):
+ if self._testMethodName == "test_search_grounding_empty_dictionary":
+ pass
+ t = content_types._make_tools(tools)
+ self.assertIsInstance(t[0].google_search_retrieval, protos.GoogleSearchRetrieval)
def test_two_fun_is_one_tool(self):
def a():
@@ -536,6 +639,25 @@ def b():
},
),
],
+ ["enum", Choices, protos.Schema(type=protos.Type.STRING, enum=["a", "b", "c", "d"])],
+ [
+ "enum_list",
+ list[Choices],
+ protos.Schema(
+ type="ARRAY",
+ items=protos.Schema(type=protos.Type.STRING, enum=["a", "b", "c", "d"]),
+ ),
+ ],
+ [
+ "has_enum",
+ HasEnum,
+ protos.Schema(
+ type=protos.Type.OBJECT,
+ properties={
+ "choice": protos.Schema(type=protos.Type.STRING, enum=["a", "b", "c", "d"])
+ },
+ ),
+ ],
)
def test_auto_schema(self, annotation, expected):
def fun(a: annotation):
diff --git a/tests/test_discuss.py b/tests/test_discuss.py
deleted file mode 100644
index 4e54cf754..000000000
--- a/tests/test_discuss.py
+++ /dev/null
@@ -1,386 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import copy
-
-import unittest.mock
-
-from google.generativeai import protos
-
-from google.generativeai import discuss
-from google.generativeai import client
-import google.generativeai as genai
-from google.generativeai.types import palm_safety_types
-
-from absl.testing import absltest
-from absl.testing import parameterized
-
-# TODO: replace returns with 'assert' statements
-
-
-class UnitTests(parameterized.TestCase):
- def setUp(self):
- self.client = unittest.mock.MagicMock()
-
- client._client_manager.clients["discuss"] = self.client
-
- self.observed_request = None
-
- self.mock_response = protos.GenerateMessageResponse(
- candidates=[
- protos.Message(content="a", author="1"),
- protos.Message(content="b", author="1"),
- protos.Message(content="c", author="1"),
- ],
- )
-
- def fake_generate_message(
- request: protos.GenerateMessageRequest,
- **kwargs,
- ) -> protos.GenerateMessageResponse:
- self.observed_request = request
- response = copy.copy(self.mock_response)
- response.messages = request.prompt.messages
- return response
-
- self.client.generate_message = fake_generate_message
-
- @parameterized.named_parameters(
- ["string", "Hello", ""],
- ["dict", {"content": "Hello"}, ""],
- ["dict_author", {"content": "Hello", "author": "me"}, "me"],
- ["proto", protos.Message(content="Hello"), ""],
- ["proto_author", protos.Message(content="Hello", author="me"), "me"],
- )
- def test_make_message(self, message, author):
- x = discuss._make_message(message)
- self.assertIsInstance(x, protos.Message)
- self.assertEqual("Hello", x.content)
- self.assertEqual(author, x.author)
-
- @parameterized.named_parameters(
- ["string", "Hello", ["Hello"]],
- ["dict", {"content": "Hello"}, ["Hello"]],
- ["proto", protos.Message(content="Hello"), ["Hello"]],
- [
- "list",
- ["hello0", {"content": "hello1"}, protos.Message(content="hello2")],
- ["hello0", "hello1", "hello2"],
- ],
- )
- def test_make_messages(self, messages, expected_contents):
- messages = discuss._make_messages(messages)
- for expected, message in zip(expected_contents, messages):
- self.assertEqual(expected, message.content)
-
- @parameterized.named_parameters(
- ["tuple", ("hello", {"content": "goodbye"})],
- ["iterable", iter(["hello", "goodbye"])],
- ["dict", {"input": "hello", "output": "goodbye"}],
- [
- "proto",
- protos.Example(
- input=protos.Message(content="hello"),
- output=protos.Message(content="goodbye"),
- ),
- ],
- )
- def test_make_example(self, example):
- x = discuss._make_example(example)
- self.assertIsInstance(x, protos.Example)
- self.assertEqual("hello", x.input.content)
- self.assertEqual("goodbye", x.output.content)
- return
-
- @parameterized.named_parameters(
- [
- "messages",
- [
- "Hi",
- {"content": "Hello!"},
- "what's your name?",
- protos.Message(content="Dave, what's yours"),
- ],
- ],
- [
- "examples",
- [
- ("Hi", "Hello!"),
- {
- "input": "what's your name?",
- "output": {"content": "Dave, what's yours"},
- },
- ],
- ],
- )
- def test_make_examples(self, examples):
- examples = discuss._make_examples(examples)
- self.assertLen(examples, 2)
- self.assertEqual(examples[0].input.content, "Hi")
- self.assertEqual(examples[0].output.content, "Hello!")
- self.assertEqual(examples[1].input.content, "what's your name?")
- self.assertEqual(examples[1].output.content, "Dave, what's yours")
-
- return
-
- def test_make_examples_from_example(self):
- ex_dict = {"input": "hello", "output": "meow!"}
- example = discuss._make_example(ex_dict)
- examples1 = discuss._make_examples(ex_dict)
- examples2 = discuss._make_examples(discuss._make_example(ex_dict))
-
- self.assertEqual(example, examples1[0])
- self.assertEqual(example, examples2[0])
-
- @parameterized.named_parameters(
- ["str", "hello"],
- ["message", protos.Message(content="hello")],
- ["messages", ["hello"]],
- ["dict", {"messages": "hello"}],
- ["dict2", {"messages": ["hello"]}],
- ["proto", protos.MessagePrompt(messages=[protos.Message(content="hello")])],
- )
- def test_make_message_prompt_from_messages(self, prompt):
- x = discuss._make_message_prompt(prompt)
- self.assertIsInstance(x, protos.MessagePrompt)
- self.assertEqual(x.messages[0].content, "hello")
- return
-
- @parameterized.named_parameters(
- [
- "dict",
- [
- {
- "context": "you are a cat",
- "examples": ["are you hungry?", "meow!"],
- "messages": "hello",
- }
- ],
- {},
- ],
- [
- "kwargs",
- [],
- {
- "context": "you are a cat",
- "examples": ["are you hungry?", "meow!"],
- "messages": "hello",
- },
- ],
- [
- "proto",
- [
- protos.MessagePrompt(
- context="you are a cat",
- examples=[
- protos.Example(
- input=protos.Message(content="are you hungry?"),
- output=protos.Message(content="meow!"),
- )
- ],
- messages=[protos.Message(content="hello")],
- )
- ],
- {},
- ],
- )
- def test_make_message_prompt_from_prompt(self, args, kwargs):
- x = discuss._make_message_prompt(*args, **kwargs)
- self.assertIsInstance(x, protos.MessagePrompt)
- self.assertEqual(x.context, "you are a cat")
- self.assertEqual(x.examples[0].input.content, "are you hungry?")
- self.assertEqual(x.examples[0].output.content, "meow!")
- self.assertEqual(x.messages[0].content, "hello")
-
- def test_make_generate_message_request_nested(
- self,
- ):
- request0 = discuss._make_generate_message_request(
- **{
- "model": "models/Dave",
- "context": "you are a cat",
- "examples": ["hello", "meow", "are you hungry?", "meow!"],
- "messages": "Please catch that mouse.",
- "temperature": 0.2,
- "candidate_count": 7,
- }
- )
- request1 = discuss._make_generate_message_request(
- **{
- "model": "models/Dave",
- "prompt": {
- "context": "you are a cat",
- "examples": ["hello", "meow", "are you hungry?", "meow!"],
- "messages": "Please catch that mouse.",
- },
- "temperature": 0.2,
- "candidate_count": 7,
- }
- )
-
- self.assertIsInstance(request0, protos.GenerateMessageRequest)
- self.assertIsInstance(request1, protos.GenerateMessageRequest)
- self.assertEqual(request0, request1)
-
- @parameterized.parameters(
- {"prompt": {}, "context": "You are a cat."},
- {
- "prompt": {"context": "You are a cat."},
- "examples": ["hello", "meow"],
- },
- {"prompt": {"examples": ["hello", "meow"]}, "messages": "hello"},
- )
- def test_make_generate_message_request_flat_prompt_conflict(
- self,
- context=None,
- examples=None,
- messages=None,
- prompt=None,
- ):
- with self.assertRaises(ValueError):
- x = discuss._make_generate_message_request(
- model="test",
- context=context,
- examples=examples,
- messages=messages,
- prompt=prompt,
- )
-
- @parameterized.parameters(
- {"kwargs": {"context": "You are a cat."}},
- {"kwargs": {"messages": "hello"}},
- {"kwargs": {"examples": [["a", "b"], ["c", "d"]]}},
- {
- "kwargs": {
- "messages": ["hello"],
- "examples": [["a", "b"], ["c", "d"]],
- }
- },
- )
- def test_reply(self, kwargs):
- response = genai.chat(**kwargs)
- first_messages = response.messages
-
- self.assertEqual("a", response.last)
- self.assertEqual(
- [
- {"author": "1", "content": "a"},
- {"author": "1", "content": "b"},
- {"author": "1", "content": "c"},
- ],
- response.candidates,
- )
-
- response = response.reply("again")
-
- def test_receive_and_reply_with_filters(self):
- self.mock_response = mock_response = protos.GenerateMessageResponse(
- candidates=[protos.Message(content="a", author="1")],
- filters=[
- protos.ContentFilter(
- reason=palm_safety_types.BlockedReason.SAFETY, message="unsafe"
- ),
- protos.ContentFilter(reason=palm_safety_types.BlockedReason.OTHER),
- ],
- )
- response = discuss.chat(messages="do filters work?")
-
- filters = response.filters
- self.assertLen(filters, 2)
- self.assertIsInstance(filters[0]["reason"], palm_safety_types.BlockedReason)
- self.assertEqual(filters[0]["reason"], palm_safety_types.BlockedReason.SAFETY)
- self.assertEqual(filters[0]["message"], "unsafe")
-
- self.mock_response = protos.GenerateMessageResponse(
- candidates=[protos.Message(content="a", author="1")],
- filters=[
- protos.ContentFilter(
- reason=palm_safety_types.BlockedReason.BLOCKED_REASON_UNSPECIFIED
- )
- ],
- )
-
- response = response.reply("Does reply work?")
- filters = response.filters
- self.assertLen(filters, 1)
- self.assertIsInstance(filters[0]["reason"], palm_safety_types.BlockedReason)
- self.assertEqual(
- filters[0]["reason"],
- palm_safety_types.BlockedReason.BLOCKED_REASON_UNSPECIFIED,
- )
-
- def test_chat_citations(self):
- self.mock_response = mock_response = protos.GenerateMessageResponse(
- candidates=[
- {
- "content": "Hello google!",
- "author": "1",
- "citation_metadata": {
- "citation_sources": [
- {
- "start_index": 6,
- "end_index": 12,
- "uri": "https://google.com",
- }
- ]
- },
- }
- ],
- )
-
- response = discuss.chat(messages="Do citations work?")
-
- self.assertEqual(
- response.candidates[0]["citation_metadata"]["citation_sources"][0]["start_index"],
- 6,
- )
-
- response = response.reply("What about a second time?")
-
- self.assertEqual(
- response.candidates[0]["citation_metadata"]["citation_sources"][0]["start_index"],
- 6,
- )
- self.assertLen(response.messages, 4)
-
- def test_set_last(self):
- response = discuss.chat(messages="Can you overwrite `.last`?")
- response.last = "yes"
- response = response.reply("glad to hear it!")
- response.last = "Me too!"
- self.assertEqual(
- [msg["content"] for msg in response.messages],
- [
- "Can you overwrite `.last`?",
- "yes",
- "glad to hear it!",
- "Me too!",
- ],
- )
-
- def test_generate_message_called_with_request_options(self):
- self.client.generate_message = unittest.mock.MagicMock()
- request = unittest.mock.ANY
- request_options = {"timeout": 120}
-
- try:
- genai.chat(**{"context": "You are a cat."}, request_options=request_options)
- except AttributeError:
- pass
-
- self.client.generate_message.assert_called_once_with(request, **request_options)
-
-
-if __name__ == "__main__":
- absltest.main()
diff --git a/tests/test_discuss_async.py b/tests/test_discuss_async.py
deleted file mode 100644
index d35d03525..000000000
--- a/tests/test_discuss_async.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-from typing import Any
-import unittest
-
-from google.generativeai import protos
-
-from google.generativeai import discuss
-from absl.testing import absltest
-from absl.testing import parameterized
-
-
-class AsyncTests(parameterized.TestCase, unittest.IsolatedAsyncioTestCase):
- async def test_chat_async(self):
- client = unittest.mock.AsyncMock()
-
- observed_request = None
-
- async def fake_generate_message(
- request: protos.GenerateMessageRequest,
- **kwargs,
- ) -> protos.GenerateMessageResponse:
- nonlocal observed_request
- observed_request = request
- return protos.GenerateMessageResponse(
- candidates=[
- protos.Message(
- author="1",
- content="Why did the chicken cross the road?",
- )
- ]
- )
-
- client.generate_message = fake_generate_message
-
- observed_response = await discuss.chat_async(
- model="models/bard",
- context="Example Prompt",
- examples=[["Example from human", "Example response from AI"]],
- messages=["Tell me a joke"],
- temperature=0.75,
- candidate_count=1,
- client=client,
- )
-
- self.assertEqual(
- observed_request,
- protos.GenerateMessageRequest(
- model="models/bard",
- prompt=protos.MessagePrompt(
- context="Example Prompt",
- examples=[
- protos.Example(
- input=protos.Message(content="Example from human"),
- output=protos.Message(content="Example response from AI"),
- )
- ],
- messages=[protos.Message(author="0", content="Tell me a joke")],
- ),
- temperature=0.75,
- candidate_count=1,
- ),
- )
- self.assertEqual(
- observed_response.candidates,
- [{"author": "1", "content": "Why did the chicken cross the road?"}],
- )
-
-
-if __name__ == "__main__":
- absltest.main()
diff --git a/tests/test_files.py b/tests/test_files.py
index 063f1ce3a..0f7ca5707 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -12,13 +12,15 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import annotations
from google.generativeai.types import file_types
import collections
import datetime
+import io
import os
-from typing import Iterable, Union
+from typing import Iterable, Sequence
import pathlib
import google
@@ -37,12 +39,13 @@ def __init__(self, test):
def create_file(
self,
- path: Union[str, pathlib.Path, os.PathLike],
+ path: str | io.IOBase | os.PathLike,
*,
- mime_type: Union[str, None] = None,
- name: Union[str, None] = None,
- display_name: Union[str, None] = None,
+ mime_type: str | None = None,
+ name: str | None = None,
+ display_name: str | None = None,
resumable: bool = True,
+ metadata: Sequence[tuple[str, str]] = (),
) -> protos.File:
self.observed_requests.append(
dict(
@@ -100,12 +103,13 @@ def test_video_metadata(self):
protos.File(
uri="https://test",
state="ACTIVE",
+ mime_type="video/quicktime",
video_metadata=dict(video_duration=datetime.timedelta(seconds=30)),
error=dict(code=7, message="ok?"),
)
)
- f = genai.upload_file(path="dummy")
+ f = genai.upload_file(path="dummy.mov")
self.assertEqual(google.rpc.status_pb2.Status(code=7, message="ok?"), f.error)
self.assertEqual(
protos.VideoMetadata(dict(video_duration=datetime.timedelta(seconds=30))),
diff --git a/tests/test_generation.py b/tests/test_generation.py
index 0cc3bfd07..67b1544b6 100644
--- a/tests/test_generation.py
+++ b/tests/test_generation.py
@@ -1,4 +1,20 @@
+# -*- coding: utf-8 -*-
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
import inspect
+import json
import string
import textwrap
from typing_extensions import TypedDict
@@ -22,6 +38,8 @@ class Person(TypedDict):
class UnitTests(parameterized.TestCase):
+ maxDiff = None
+
@parameterized.named_parameters(
[
"protos.GenerationConfig",
@@ -416,12 +434,8 @@ def test_join_prompt_feedbacks(self):
],
"role": "assistant",
},
- "citation_metadata": {"citation_sources": []},
"index": 0,
- "finish_reason": 0,
- "safety_ratings": [],
- "token_count": 0,
- "grounding_attributions": [],
+ "citation_metadata": {},
},
{
"content": {
@@ -429,11 +443,7 @@ def test_join_prompt_feedbacks(self):
"role": "assistant",
},
"index": 1,
- "citation_metadata": {"citation_sources": []},
- "finish_reason": 0,
- "safety_ratings": [],
- "token_count": 0,
- "grounding_attributions": [],
+ "citation_metadata": {},
},
{
"content": {
@@ -458,17 +468,16 @@ def test_join_prompt_feedbacks(self):
},
]
},
- "finish_reason": 0,
- "safety_ratings": [],
- "token_count": 0,
- "grounding_attributions": [],
},
]
def test_join_candidates(self):
candidate_lists = [[protos.Candidate(c) for c in cl] for cl in self.CANDIDATE_LISTS]
result = generation_types._join_candidate_lists(candidate_lists)
- self.assertEqual(self.MERGED_CANDIDATES, [type(r).to_dict(r) for r in result])
+ self.assertEqual(
+ self.MERGED_CANDIDATES,
+ [type(r).to_dict(r, including_default_value_fields=False) for r in result],
+ )
def test_join_chunks(self):
chunks = [protos.GenerateContentResponse(candidates=cl) for cl in self.CANDIDATE_LISTS]
@@ -480,6 +489,12 @@ def test_join_chunks(self):
],
)
+ chunks[-1].usage_metadata = protos.GenerateContentResponse.UsageMetadata(
+ prompt_token_count=5
+ )
+
+ chunks[-1].model_version = "gemini-1.5-flash-002"
+
result = generation_types._join_chunks(chunks)
expected = protos.GenerateContentResponse(
@@ -495,10 +510,19 @@ def test_join_chunks(self):
}
],
},
+ "usage_metadata": {"prompt_token_count": 5},
+ "model_version": "gemini-1.5-flash-002",
},
)
- self.assertEqual(type(expected).to_dict(expected), type(result).to_dict(expected))
+ expected = json.dumps(
+ type(expected).to_dict(expected, including_default_value_fields=False), indent=4
+ )
+ result = json.dumps(
+ type(result).to_dict(result, including_default_value_fields=False), indent=4
+ )
+
+ self.assertEqual(expected, result)
def test_generate_content_response_iterator_end_to_end(self):
chunks = [protos.GenerateContentResponse(candidates=cl) for cl in self.CANDIDATE_LISTS]
diff --git a/tests/test_generative_models.py b/tests/test_generative_models.py
index cccea9d48..74469e5b8 100644
--- a/tests/test_generative_models.py
+++ b/tests/test_generative_models.py
@@ -115,7 +115,7 @@ def setUp(self):
def test_hello(self):
# Generate text from text prompt
- model = generative_models.GenerativeModel(model_name="gemini-pro")
+ model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
self.responses["generate_content"].append(simple_response("world!"))
@@ -138,7 +138,7 @@ def test_hello(self):
)
def test_image(self, content):
# Generate text from image
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
cat = "It's a cat"
self.responses["generate_content"].append(simple_response(cat))
@@ -172,7 +172,7 @@ def test_image(self, content):
)
def test_generation_config_overwrite(self, config1, config2):
# Generation config
- model = generative_models.GenerativeModel("gemini-pro", generation_config=config1)
+ model = generative_models.GenerativeModel("gemini-1.5-flash", generation_config=config1)
self.responses["generate_content"] = [
simple_response(" world!"),
@@ -218,7 +218,7 @@ def test_generation_config_overwrite(self, config1, config2):
)
def test_safety_overwrite(self, safe1, safe2):
# Safety
- model = generative_models.GenerativeModel("gemini-pro", safety_settings=safe1)
+ model = generative_models.GenerativeModel("gemini-1.5-flash", safety_settings=safe1)
self.responses["generate_content"] = [
simple_response(" world!"),
@@ -253,7 +253,7 @@ def test_stream_basic(self):
chunks = ["first", " second", " third"]
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)
self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
@@ -267,7 +267,7 @@ def test_stream_lookahead(self):
chunks = ["first", " second", " third"]
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)
self.assertEqual(self.observed_requests[0].contents[0].parts[0].text, "Hello")
@@ -287,7 +287,7 @@ def test_stream_prompt_feedback_blocked(self):
]
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Bad stuff!", stream=True)
self.assertEqual(
@@ -322,7 +322,7 @@ def test_stream_prompt_feedback_not_blocked(self):
]
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)
self.assertEqual(
@@ -389,7 +389,7 @@ def add(a: int, b: int) -> int:
def test_chat(self):
# Multi turn chat
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
self.responses["generate_content"] = [
@@ -423,7 +423,7 @@ def test_chat(self):
def test_chat_roles(self):
self.responses["generate_content"] = [simple_response("hello!")]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
response = chat.send_message("hello?")
history = chat.history
@@ -792,7 +792,7 @@ def test_tool_config(self, tool_config, expected_tool_config):
)
self.responses["generate_content"] = [simple_response("echo echo")]
- model = generative_models.GenerativeModel("gemini-pro", tools=tools)
+ model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
_ = model.generate_content("Hello", tools=[tools], tool_config=tool_config)
req = self.observed_requests[0]
@@ -811,7 +811,9 @@ def test_tool_config(self, tool_config, expected_tool_config):
)
def test_system_instruction(self, instruction, expected_instr):
self.responses["generate_content"] = [simple_response("echo echo")]
- model = generative_models.GenerativeModel("gemini-pro", system_instruction=instruction)
+ model = generative_models.GenerativeModel(
+ "gemini-1.5-flash", system_instruction=instruction
+ )
_ = model.generate_content("test")
@@ -852,7 +854,7 @@ def test_count_tokens_smoke(self, kwargs):
)
def test_repr_for_unary_non_streamed_response(self):
- model = generative_models.GenerativeModel(model_name="gemini-pro")
+ model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
self.responses["generate_content"].append(simple_response("world!"))
response = model.generate_content("Hello")
@@ -885,7 +887,7 @@ def test_repr_for_streaming_start_to_finish(self):
chunks = ["first", " second", " third"]
self.responses["stream_generate_content"] = [(simple_response(text) for text in chunks)]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Hello", stream=True)
iterator = iter(response)
@@ -935,8 +937,7 @@ def test_repr_for_streaming_start_to_finish(self):
"citation_metadata": {}
}
],
- "prompt_feedback": {},
- "usage_metadata": {}
+ "prompt_feedback": {}
}),
)"""
)
@@ -964,8 +965,7 @@ def test_repr_for_streaming_start_to_finish(self):
"citation_metadata": {}
}
],
- "prompt_feedback": {},
- "usage_metadata": {}
+ "prompt_feedback": {}
}),
)"""
)
@@ -982,7 +982,7 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
]
self.responses["stream_generate_content"] = [(chunk for chunk in chunks)]
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
response = model.generate_content("Bad stuff!", stream=True)
result = repr(response)
@@ -998,10 +998,10 @@ def test_repr_error_info_for_stream_prompt_feedback_blocked(self):
}
}),
),
- error= prompt_feedback {
+ error=BlockedPromptException(prompt_feedback {
block_reason: SAFETY
}
- """
+ )"""
)
self.assertEqual(expected, result)
@@ -1056,11 +1056,10 @@ def no_throw():
"citation_metadata": {}
}
],
- "prompt_feedback": {},
- "usage_metadata": {}
+ "prompt_feedback": {}
}),
),
- error= """
+ error=ValueError()"""
)
self.assertEqual(expected, result)
@@ -1095,47 +1094,11 @@ def test_repr_error_info_for_chat_streaming_unexpected_stop(self):
response = chat.send_message("hello2", stream=True)
result = repr(response)
- expected = textwrap.dedent(
- """\
- response:
- GenerateContentResponse(
- done=True,
- iterator=None,
- result=protos.GenerateContentResponse({
- "candidates": [
- {
- "content": {
- "parts": [
- {
- "text": "abc"
- }
- ]
- },
- "finish_reason": "SAFETY",
- "index": 0,
- "citation_metadata": {}
- }
- ],
- "prompt_feedback": {},
- "usage_metadata": {}
- }),
- ),
- error= index: 0
- content {
- parts {
- text: "abc"
- }
- }
- finish_reason: SAFETY
- citation_metadata {
- }
- """
- )
- self.assertEqual(expected, result)
+ self.assertIn("StopCandidateException", result)
def test_repr_for_multi_turn_chat(self):
# Multi turn chat
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
self.responses["generate_content"] = [
@@ -1158,7 +1121,7 @@ def test_repr_for_multi_turn_chat(self):
"""\
ChatSession(
model=genai.GenerativeModel(
- model_name='models/gemini-pro',
+ model_name='models/gemini-1.5-flash',
generation_config={},
safety_settings={},
tools=None,
@@ -1172,7 +1135,7 @@ def test_repr_for_multi_turn_chat(self):
def test_repr_for_incomplete_streaming_chat(self):
# Multi turn chat
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
self.responses["stream_generate_content"] = [
@@ -1187,7 +1150,7 @@ def test_repr_for_incomplete_streaming_chat(self):
"""\
ChatSession(
model=genai.GenerativeModel(
- model_name='models/gemini-pro',
+ model_name='models/gemini-1.5-flash',
generation_config={},
safety_settings={},
tools=None,
@@ -1201,7 +1164,7 @@ def test_repr_for_incomplete_streaming_chat(self):
def test_repr_for_broken_streaming_chat(self):
# Multi turn chat
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
self.responses["stream_generate_content"] = [
@@ -1232,7 +1195,7 @@ def test_repr_for_broken_streaming_chat(self):
"""\
ChatSession(
model=genai.GenerativeModel(
- model_name='models/gemini-pro',
+ model_name='models/gemini-1.5-flash',
generation_config={},
safety_settings={},
tools=None,
@@ -1245,7 +1208,9 @@ def test_repr_for_broken_streaming_chat(self):
self.assertEqual(expected, result)
def test_repr_for_system_instruction(self):
- model = generative_models.GenerativeModel("gemini-pro", system_instruction="Be excellent.")
+ model = generative_models.GenerativeModel(
+ "gemini-1.5-flash", system_instruction="Be excellent."
+ )
result = repr(model)
self.assertIn("system_instruction='Be excellent.'", result)
@@ -1276,7 +1241,7 @@ def test_chat_with_request_options(self):
)
request_options = {"timeout": 120}
- model = generative_models.GenerativeModel("gemini-pro")
+ model = generative_models.GenerativeModel("gemini-1.5-flash")
chat = model.start_chat()
chat.send_message("hello", request_options=helper_types.RequestOptions(**request_options))
diff --git a/tests/test_generative_models_async.py b/tests/test_generative_models_async.py
index dd9bc3b62..b37c65235 100644
--- a/tests/test_generative_models_async.py
+++ b/tests/test_generative_models_async.py
@@ -80,7 +80,7 @@ async def count_tokens(
async def test_basic(self):
# Generate text from text prompt
- model = generative_models.GenerativeModel(model_name="gemini-pro")
+ model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
self.responses["generate_content"] = [simple_response("world!")]
@@ -93,7 +93,7 @@ async def test_basic(self):
async def test_streaming(self):
# Generate text from text prompt
- model = generative_models.GenerativeModel(model_name="gemini-pro")
+ model = generative_models.GenerativeModel(model_name="gemini-1.5-flash")
async def responses():
for c in "world!":
@@ -195,7 +195,7 @@ async def test_tool_config(self, tool_config, expected_tool_config):
)
self.responses["generate_content"] = [simple_response("echo echo")]
- model = generative_models.GenerativeModel("gemini-pro", tools=tools)
+ model = generative_models.GenerativeModel("gemini-1.5-flash", tools=tools)
_ = await model.generate_content_async("Hello", tools=[tools], tool_config=tool_config)
req = self.observed_requests[0]
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index f060caf88..5d9ec1c42 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -12,11 +12,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import pathlib
import copy
import collections
from typing import Union
+from absl.testing import absltest
from absl.testing import parameterized
from google.generativeai import protos
@@ -81,3 +81,7 @@ def test_get_model(self, request_options, expected_timeout, expected_retry):
self.assertEqual(self.observed_timeout[0], expected_timeout)
self.assertEqual(str(self.observed_retry[0]), str(expected_retry))
+
+
+if __name__ == "__main__":
+ absltest.main()
diff --git a/tests/test_img.gif b/tests/test_img.gif
new file mode 100644
index 000000000..66c81ac7a
Binary files /dev/null and b/tests/test_img.gif differ
diff --git a/tests/test_models.py b/tests/test_models.py
index c7cd1dbcd..6f10f9123 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -169,14 +169,6 @@ def test_max_temperature(self):
model = models.get_base_model(name)
self.assertEqual(max_temperature, model.max_temperature)
- @parameterized.named_parameters(
- ["simple", "mystery-bison-001"],
- ["model-instance", protos.Model(name="how?-bison-001")],
- )
- def test_fail_with_unscoped_model_name(self, name):
- with self.assertRaises(ValueError):
- model = models.get_model(name)
-
def test_list_models(self):
# The low level lib wraps the response in an iterable, so this is a fair test.
self.responses = {
diff --git a/tests/test_protos.py b/tests/test_protos.py
index 1b59b0c6e..8a76bd0e3 100644
--- a/tests/test_protos.py
+++ b/tests/test_protos.py
@@ -15,6 +15,7 @@
import pathlib
import re
+from absl.testing import absltest
from absl.testing import parameterized
ROOT = pathlib.Path(__file__).parent.parent
@@ -32,3 +33,7 @@ def test_check_glm_imports(self):
match,
msg=f"Bad `glm.` usage, use `genai.protos` instead,\n in {fpath}",
)
+
+
+if __name__ == "__main__":
+ absltest.main()
diff --git a/tests/test_responder.py b/tests/test_responder.py
deleted file mode 100644
index c075fc65a..000000000
--- a/tests/test_responder.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import pathlib
-from typing import Any
-
-from absl.testing import absltest
-from absl.testing import parameterized
-from google.generativeai import protos
-from google.generativeai import responder
-import IPython.display
-import PIL.Image
-
-HERE = pathlib.Path(__file__).parent
-TEST_PNG_PATH = HERE / "test_img.png"
-TEST_PNG_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.png"
-TEST_PNG_DATA = TEST_PNG_PATH.read_bytes()
-
-TEST_JPG_PATH = HERE / "test_img.jpg"
-TEST_JPG_URL = "https://storage.googleapis.com/generativeai-downloads/data/test_img.jpg"
-TEST_JPG_DATA = TEST_JPG_PATH.read_bytes()
-
-
-# simple test function
-def datetime():
- "Returns the current UTC date and time."
-
-
-class UnitTests(parameterized.TestCase):
- @parameterized.named_parameters(
- [
- "FunctionLibrary",
- responder.FunctionLibrary(
- tools=protos.Tool(
- function_declarations=[
- protos.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- )
- ]
- )
- ),
- ],
- [
- "IterableTool-Tool",
- [
- responder.Tool(
- function_declarations=[
- protos.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- )
- ]
- )
- ],
- ],
- [
- "IterableTool-protos.Tool",
- [
- protos.Tool(
- function_declarations=[
- protos.FunctionDeclaration(
- name="datetime",
- description="Returns the current UTC date and time.",
- )
- ]
- )
- ],
- ],
- [
- "IterableTool-ToolDict",
- [
- dict(
- function_declarations=[
- dict(
- name="datetime",
- description="Returns the current UTC date and time.",
- )
- ]
- )
- ],
- ],
- [
- "IterableTool-IterableFD",
- [
- [
- protos.FunctionDeclaration(
- name="datetime",
- description="Returns the current UTC date and time.",
- )
- ]
- ],
- ],
- [
- "IterableTool-FD",
- [
- protos.FunctionDeclaration(
- name="datetime",
- description="Returns the current UTC date and time.",
- )
- ],
- ],
- [
- "Tool",
- responder.Tool(
- function_declarations=[
- protos.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- )
- ]
- ),
- ],
- [
- "protos.Tool",
- protos.Tool(
- function_declarations=[
- protos.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- )
- ]
- ),
- ],
- [
- "ToolDict",
- dict(
- function_declarations=[
- dict(name="datetime", description="Returns the current UTC date and time.")
- ]
- ),
- ],
- [
- "IterableFD-FD",
- [
- responder.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- )
- ],
- ],
- [
- "IterableFD-CFD",
- [
- responder.CallableFunctionDeclaration(
- name="datetime",
- description="Returns the current UTC date and time.",
- function=datetime,
- )
- ],
- ],
- [
- "IterableFD-dict",
- [dict(name="datetime", description="Returns the current UTC date and time.")],
- ],
- ["IterableFD-Callable", [datetime]],
- [
- "FD",
- responder.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- ),
- ],
- [
- "CFD",
- responder.CallableFunctionDeclaration(
- name="datetime",
- description="Returns the current UTC date and time.",
- function=datetime,
- ),
- ],
- [
- "protos.FD",
- protos.FunctionDeclaration(
- name="datetime", description="Returns the current UTC date and time."
- ),
- ],
- ["dict", dict(name="datetime", description="Returns the current UTC date and time.")],
- ["Callable", datetime],
- )
- def test_to_tools(self, tools):
- function_library = responder.to_function_library(tools)
- if function_library is None:
- raise ValueError("This shouldn't happen")
- tools = function_library.to_proto()
-
- tools = type(tools[0]).to_dict(tools[0])
- tools["function_declarations"][0].pop("parameters", None)
-
- expected = dict(
- function_declarations=[
- dict(name="datetime", description="Returns the current UTC date and time.")
- ]
- )
-
- self.assertEqual(tools, expected)
-
- def test_two_fun_is_one_tool(self):
- def a():
- pass
-
- def b():
- pass
-
- function_library = responder.to_function_library([a, b])
- if function_library is None:
- raise ValueError("This shouldn't happen")
- tools = function_library.to_proto()
-
- self.assertLen(tools, 1)
- self.assertLen(tools[0].function_declarations, 2)
-
- @parameterized.named_parameters(
- ["int", int, protos.Schema(type=protos.Type.INTEGER)],
- ["float", float, protos.Schema(type=protos.Type.NUMBER)],
- ["str", str, protos.Schema(type=protos.Type.STRING)],
- [
- "list",
- list[str],
- protos.Schema(
- type=protos.Type.ARRAY,
- items=protos.Schema(type=protos.Type.STRING),
- ),
- ],
- [
- "list-list-int",
- list[list[int]],
- protos.Schema(
- type=protos.Type.ARRAY,
- items=protos.Schema(
- protos.Schema(
- type=protos.Type.ARRAY,
- items=protos.Schema(type=protos.Type.INTEGER),
- ),
- ),
- ),
- ],
- ["dict", dict, protos.Schema(type=protos.Type.OBJECT)],
- ["dict-str-any", dict[str, Any], protos.Schema(type=protos.Type.OBJECT)],
- )
- def test_auto_schema(self, annotation, expected):
- def fun(a: annotation):
- pass
-
- cfd = responder.FunctionDeclaration.from_function(fun)
- got = cfd.parameters.properties["a"]
- self.assertEqual(got, expected)
diff --git a/tests/test_text.py b/tests/test_text.py
deleted file mode 100644
index 795c3dfcd..000000000
--- a/tests/test_text.py
+++ /dev/null
@@ -1,542 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import copy
-import math
-from typing import Any
-import unittest
-import unittest.mock as mock
-
-from google.generativeai import protos
-
-from google.generativeai import text as text_service
-from google.generativeai import client
-from google.generativeai.types import palm_safety_types
-from google.generativeai.types import model_types
-from absl.testing import absltest
-from absl.testing import parameterized
-
-
-class UnitTests(parameterized.TestCase):
- def setUp(self):
- self.client = unittest.mock.MagicMock()
-
- client._client_manager.clients["text"] = self.client
- client._client_manager.clients["model"] = self.client
-
- self.observed_requests = []
-
- self.responses = {}
-
- def add_client_method(f):
- name = f.__name__
- setattr(self.client, name, f)
- return f
-
- @add_client_method
- def generate_text(
- request: protos.GenerateTextRequest,
- **kwargs,
- ) -> protos.GenerateTextResponse:
- self.observed_requests.append(request)
- return self.responses["generate_text"]
-
- @add_client_method
- def embed_text(
- request: protos.EmbedTextRequest,
- **kwargs,
- ) -> protos.EmbedTextResponse:
- self.observed_requests.append(request)
- return self.responses["embed_text"]
-
- @add_client_method
- def batch_embed_text(
- request: protos.EmbedTextRequest,
- **kwargs,
- ) -> protos.EmbedTextResponse:
- self.observed_requests.append(request)
-
- return protos.BatchEmbedTextResponse(
- embeddings=[protos.Embedding(value=[1, 2, 3])] * len(request.texts)
- )
-
- @add_client_method
- def count_text_tokens(
- request: protos.CountTextTokensRequest,
- **kwargs,
- ) -> protos.CountTextTokensResponse:
- self.observed_requests.append(request)
- return self.responses["count_text_tokens"]
-
- @add_client_method
- def get_tuned_model(name) -> protos.TunedModel:
- request = protos.GetTunedModelRequest(name=name)
- self.observed_requests.append(request)
- response = copy.copy(self.responses["get_tuned_model"])
- return response
-
- @parameterized.named_parameters(
- [
- dict(testcase_name="string", prompt="Hello how are"),
- ]
- )
- def test_make_prompt(self, prompt):
- x = text_service._make_text_prompt(prompt)
- self.assertIsInstance(x, protos.TextPrompt)
- self.assertEqual("Hello how are", x.text)
-
- @parameterized.named_parameters(
- [
- dict(testcase_name="string", prompt="What are you"),
- ]
- )
- def test_make_generate_text_request(self, prompt):
- x = text_service._make_generate_text_request(model="models/chat-bison-001", prompt=prompt)
- self.assertEqual("models/chat-bison-001", x.model)
- self.assertIsInstance(x, protos.GenerateTextRequest)
-
- @parameterized.named_parameters(
- [
- dict(
- testcase_name="basic_model",
- model="models/chat-lamda-001",
- text="What are you?",
- )
- ]
- )
- def test_generate_embeddings(self, model, text):
- self.responses["embed_text"] = protos.EmbedTextResponse(
- embedding=protos.Embedding(value=[1, 2, 3])
- )
-
- emb = text_service.generate_embeddings(model=model, text=text)
-
- self.assertIsInstance(emb, dict)
- self.assertEqual(
- self.observed_requests[-1], protos.EmbedTextRequest(model=model, text=text)
- )
- self.assertIsInstance(emb["embedding"][0], float)
-
- @parameterized.named_parameters(
- [
- dict(
- testcase_name="small-2",
- model="models/chat-lamda-001",
- text=["Who are you?", "Who am I?"],
- ),
- dict(
- testcase_name="even-batch",
- model="models/chat-lamda-001",
- text=["Who are you?"] * 100,
- ),
- dict(
- testcase_name="even-batch-plus-one",
- model="models/chat-lamda-001",
- text=["Who are you?"] * 101,
- ),
- dict(
- testcase_name="odd-batch",
- model="models/chat-lamda-001",
- text=["Who are you?"] * 237,
- ),
- ]
- )
- def test_generate_embeddings_batch(self, model, text):
- emb = text_service.generate_embeddings(model=model, text=text)
-
- self.assertIsInstance(emb, dict)
-
- # Check first and last requests.
- self.assertEqual(self.observed_requests[-1].model, model)
- self.assertEqual(self.observed_requests[-1].texts[-1], text[-1])
- self.assertEqual(self.observed_requests[0].texts[0], text[0])
-
- # Check that the list has the right length.
- self.assertIsInstance(emb["embedding"][0], list)
- self.assertLen(emb["embedding"], len(text))
-
- # Check that the right number of requests were sent.
- self.assertLen(
- self.observed_requests,
- math.ceil(len(text) / text_service.EMBEDDING_MAX_BATCH_SIZE),
- )
-
- @parameterized.named_parameters(
- [
- dict(testcase_name="basic", prompt="Why did the chicken cross the"),
- dict(
- testcase_name="temperature",
- prompt="Why did the chicken cross the",
- temperature=0.75,
- ),
- dict(
- testcase_name="stop_list",
- prompt="Why did the chicken cross the",
- stop_sequences=["a", "b", "c"],
- ),
- dict(
- testcase_name="count",
- prompt="Why did the chicken cross the",
- candidate_count=2,
- ),
- ]
- )
- def test_generate_response(self, *, prompt, **kwargs):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[
- protos.TextCompletion(output=" road?"),
- protos.TextCompletion(output=" bridge?"),
- protos.TextCompletion(output=" river?"),
- ]
- )
-
- complete = text_service.generate_text(prompt=prompt, **kwargs)
-
- self.assertEqual(
- self.observed_requests[-1],
- protos.GenerateTextRequest(
- model="models/text-bison-001", prompt=protos.TextPrompt(text=prompt), **kwargs
- ),
- )
-
- self.assertIsInstance(complete.result, str)
-
- self.assertEqual(
- complete.candidates,
- [
- {"output": " road?", "safety_ratings": []},
- {"output": " bridge?", "safety_ratings": []},
- {"output": " river?", "safety_ratings": []},
- ],
- )
-
- def test_stop_string(self):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[
- protos.TextCompletion(output="Hello world?"),
- protos.TextCompletion(output="Hell!"),
- protos.TextCompletion(output="I'm going to stop"),
- ]
- )
- complete = text_service.generate_text(prompt="Hello", stop_sequences="stop")
-
- self.assertEqual(
- self.observed_requests[-1],
- protos.GenerateTextRequest(
- model="models/text-bison-001",
- prompt=protos.TextPrompt(text="Hello"),
- stop_sequences=["stop"],
- ),
- )
- # Just make sure it made it into the request object.
- self.assertEqual(self.observed_requests[-1].stop_sequences, ["stop"])
-
- @parameterized.named_parameters(
- [
- dict(
- testcase_name="basic",
- safety_settings=[
- {
- "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- "threshold": palm_safety_types.HarmBlockThreshold.BLOCK_NONE,
- },
- {
- "category": palm_safety_types.HarmCategory.HARM_CATEGORY_VIOLENCE,
- "threshold": palm_safety_types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
- },
- ],
- ),
- dict(
- testcase_name="strings",
- safety_settings=[
- {
- "category": "medical",
- "threshold": "block_none",
- },
- {
- "category": "violent",
- "threshold": "low",
- },
- ],
- ),
- dict(
- testcase_name="flat",
- safety_settings={"medical": "block_none", "sex": "low"},
- ),
- dict(
- testcase_name="mixed",
- safety_settings={
- "medical": palm_safety_types.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
- palm_safety_types.HarmCategory.HARM_CATEGORY_VIOLENCE: 1,
- },
- ),
- ]
- )
- def test_safety_settings(self, safety_settings):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[
- protos.TextCompletion(output="No"),
- ]
- )
- # This test really just checks that the safety_settings get converted to a proto.
- result = text_service.generate_text(
- prompt="Say something wicked.", safety_settings=safety_settings
- )
-
- self.assertEqual(
- self.observed_requests[-1].safety_settings[0].category,
- palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- )
-
- def test_filters(self):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[{"output": "hello"}],
- filters=[
- {
- "reason": palm_safety_types.BlockedReason.SAFETY,
- "message": "not safe",
- }
- ],
- )
-
- response = text_service.generate_text(prompt="do filters work?")
- self.assertIsInstance(response.filters[0]["reason"], palm_safety_types.BlockedReason)
- self.assertEqual(response.filters[0]["reason"], palm_safety_types.BlockedReason.SAFETY)
-
- def test_safety_feedback(self):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[{"output": "hello"}],
- safety_feedback=[
- {
- "rating": {
- "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- "probability": palm_safety_types.HarmProbability.HIGH,
- },
- "setting": {
- "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- "threshold": palm_safety_types.HarmBlockThreshold.BLOCK_NONE,
- },
- }
- ],
- )
-
- response = text_service.generate_text(prompt="does safety feedback work?")
- self.assertIsInstance(
- response.safety_feedback[0]["rating"]["probability"],
- palm_safety_types.HarmProbability,
- )
- self.assertEqual(
- response.safety_feedback[0]["rating"]["probability"],
- palm_safety_types.HarmProbability.HIGH,
- )
-
- self.assertIsInstance(
- response.safety_feedback[0]["setting"]["category"],
- protos.HarmCategory,
- )
- self.assertEqual(
- response.safety_feedback[0]["setting"]["category"],
- palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- )
-
- def test_candidate_safety_feedback(self):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[
- {
- "output": "hello",
- "safety_ratings": [
- {
- "category": palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- "probability": palm_safety_types.HarmProbability.HIGH,
- },
- {
- "category": palm_safety_types.HarmCategory.HARM_CATEGORY_VIOLENCE,
- "probability": palm_safety_types.HarmProbability.LOW,
- },
- ],
- }
- ]
- )
-
- result = text_service.generate_text(prompt="Write a story from the ER.")
- self.assertIsInstance(
- result.candidates[0]["safety_ratings"][0]["category"],
- protos.HarmCategory,
- )
- self.assertEqual(
- result.candidates[0]["safety_ratings"][0]["category"],
- palm_safety_types.HarmCategory.HARM_CATEGORY_MEDICAL,
- )
-
- self.assertIsInstance(
- result.candidates[0]["safety_ratings"][0]["probability"],
- palm_safety_types.HarmProbability,
- )
- self.assertEqual(
- result.candidates[0]["safety_ratings"][0]["probability"],
- palm_safety_types.HarmProbability.HIGH,
- )
-
- def test_candidate_citations(self):
- self.responses["generate_text"] = protos.GenerateTextResponse(
- candidates=[
- {
- "output": "Hello Google!",
- "citation_metadata": {
- "citation_sources": [
- {
- "start_index": 6,
- "end_index": 12,
- "uri": "https://google.com",
- }
- ]
- },
- }
- ]
- )
- result = text_service.generate_text(prompt="Hi my name is Google")
- self.assertEqual(
- result.candidates[0]["citation_metadata"]["citation_sources"][0]["start_index"],
- 6,
- )
-
- @parameterized.named_parameters(
- [
- dict(testcase_name="base-name", model="models/text-bison-001"),
- dict(testcase_name="tuned-name", model="tunedModels/bipedal-pangolin-001"),
- dict(
- testcase_name="model",
- model=model_types.Model(
- name="models/text-bison-001",
- base_model_id="text-bison-001",
- version="001",
- display_name="🦬",
- description="🦬🦬🦬🦬🦬🦬🦬🦬🦬🦬🦬",
- input_token_limit=8000,
- output_token_limit=4000,
- supported_generation_methods=["GenerateText"],
- ),
- ),
- dict(
- testcase_name="tuned_model",
- model=model_types.TunedModel(
- name="tunedModels/bipedal-pangolin-001",
- base_model="models/text-bison-001",
- ),
- ),
- dict(
- testcase_name="protos.model",
- model=protos.Model(
- name="models/text-bison-001",
- ),
- ),
- dict(
- testcase_name="protos.tuned_model",
- model=protos.TunedModel(
- name="tunedModels/bipedal-pangolin-001",
- base_model="models/text-bison-001",
- ),
- ),
- dict(
- testcase_name="protos.tuned_model_nested",
- model=protos.TunedModel(
- name="tunedModels/bipedal-pangolin-002",
- tuned_model_source={
- "tuned_model": "tunedModels/bipedal-pangolin-002",
- "base_model": "models/text-bison-001",
- },
- ),
- ),
- ]
- )
- def test_count_message_tokens(self, model):
- self.responses["get_tuned_model"] = protos.TunedModel(
- name="tunedModels/bipedal-pangolin-001", base_model="models/text-bison-001"
- )
- self.responses["count_text_tokens"] = protos.CountTextTokensResponse(token_count=7)
-
- response = text_service.count_text_tokens(model, "Tell me a story about a magic backpack.")
- self.assertEqual({"token_count": 7}, response)
-
- should_look_up_model = isinstance(model, str) and model.startswith("tunedModels/")
- if should_look_up_model:
- self.assertLen(self.observed_requests, 2)
- self.assertEqual(
- self.observed_requests[0],
- protos.GetTunedModelRequest(name="tunedModels/bipedal-pangolin-001"),
- )
-
- def test_count_text_tokens_called_with_request_options(self):
- self.client.count_text_tokens = unittest.mock.MagicMock()
- request = unittest.mock.ANY
- request_options = {"timeout": 120}
-
- try:
- result = text_service.count_text_tokens(
- model="models/",
- prompt="",
- request_options=request_options,
- )
- except AttributeError:
- pass
-
- self.client.count_text_tokens.assert_called_once_with(request, **request_options)
-
- def test_batch_embed_text_called_with_request_options(self):
- self.client.batch_embed_text = unittest.mock.MagicMock()
- request = unittest.mock.ANY
- request_options = {"timeout": 120}
-
- try:
- result = text_service.generate_embeddings(
- model="models/",
- text=["first", "second"],
- request_options=request_options,
- )
- except AttributeError:
- pass
-
- self.client.batch_embed_text.assert_called_once_with(request, **request_options)
-
- def test_embed_text_called_with_request_options(self):
- self.client.embed_text = unittest.mock.MagicMock()
- request = unittest.mock.ANY
- request_options = {"timeout": 120}
-
- try:
- result = text_service.generate_embeddings(
- model="models/",
- text="",
- request_options=request_options,
- )
- except AttributeError:
- pass
-
- self.client.embed_text.assert_called_once_with(request, **request_options)
-
- def test_generate_text_called_with_request_options(self):
- self.client.generate_text = unittest.mock.MagicMock()
- request = unittest.mock.ANY
- request_options = {"timeout": 120}
-
- try:
- result = text_service.generate_text(prompt="", request_options=request_options)
- except AttributeError:
- pass
-
- self.client.generate_text.assert_called_once_with(request, **request_options)
-
-
-if __name__ == "__main__":
- absltest.main()
diff --git a/third_party/LICENSE.txt b/third_party/LICENSE.txt
index bd47e3837..6d50e78f4 100644
--- a/third_party/LICENSE.txt
+++ b/third_party/LICENSE.txt
@@ -8,3 +8,6 @@
* This is the first paragraph from Shakespeare's "spring", public domain.
* Cajun_instruments.jpg
* This image is from Wikimedia Commons, a public domain (https://commons.wikimedia.org/wiki/Category:Musical_instruments#/media/File:Cajun_instruments.jpg).
+* test.pdf
+ * This is the first 2 pages of https://arxiv.org/abs/2403.05530 by Google Gemini Team.
+ * License: CC-BY 4.0
\ No newline at end of file
diff --git a/third_party/test.pdf b/third_party/test.pdf
new file mode 100644
index 000000000..25de0918f
Binary files /dev/null and b/third_party/test.pdf differ