# -weight: 500;">pip -weight: 500;">install -U google-genai from google import genai from google.genai import types client = genai.Client(api_key="YOUR_API_KEY") COMMAND_BLOCK: # -weight: 500;">pip -weight: 500;">install -U google-genai from google import genai from google.genai import types client = genai.Client(api_key="YOUR_API_KEY") COMMAND_BLOCK: # -weight: 500;">pip -weight: 500;">install -U google-genai from google import genai from google.genai import types client = genai.Client(api_key="YOUR_API_KEY") COMMAND_BLOCK: text = "Hey, are you down to grab some pizza later? I'm starving!" response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", config={ "system_instruction": "Only output the translated text" }, contents=f"Translate the following text to German: {text}" ) print(response.text) # Hey, hast du Lust, später eine Pizza essen zu gehen? Ich habe riesigen Hunger! COMMAND_BLOCK: text = "Hey, are you down to grab some pizza later? I'm starving!" response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", config={ "system_instruction": "Only output the translated text" }, contents=f"Translate the following text to German: {text}" ) print(response.text) # Hey, hast du Lust, später eine Pizza essen zu gehen? Ich habe riesigen Hunger! COMMAND_BLOCK: text = "Hey, are you down to grab some pizza later? I'm starving!" response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", config={ "system_instruction": "Only output the translated text" }, contents=f"Translate the following text to German: {text}" ) print(response.text) # Hey, hast du Lust, später eine Pizza essen zu gehen? Ich habe riesigen Hunger! COMMAND_BLOCK: # URL = "https://storage.googleapis.com/generativeai-downloads/data/State_of_the_Union_Address_30_January_1961.mp3" # Upload the audio file to the GenAI File API uploaded_file = client.files.upload(file="sample.mp3") prompt = "Generate a transcript of the audio." # prompt = "Generate a transcript of the audio. Remove filler words such as 'um', 'uh', 'like'." response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[prompt, uploaded_file] ) print(response.text) COMMAND_BLOCK: # URL = "https://storage.googleapis.com/generativeai-downloads/data/State_of_the_Union_Address_30_January_1961.mp3" # Upload the audio file to the GenAI File API uploaded_file = client.files.upload(file="sample.mp3") prompt = "Generate a transcript of the audio." # prompt = "Generate a transcript of the audio. Remove filler words such as 'um', 'uh', 'like'." response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[prompt, uploaded_file] ) print(response.text) COMMAND_BLOCK: # URL = "https://storage.googleapis.com/generativeai-downloads/data/State_of_the_Union_Address_30_January_1961.mp3" # Upload the audio file to the GenAI File API uploaded_file = client.files.upload(file="sample.mp3") prompt = "Generate a transcript of the audio." # prompt = "Generate a transcript of the audio. Remove filler words such as 'um', 'uh', 'like'." response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[prompt, uploaded_file] ) print(response.text) COMMAND_BLOCK: from pydantic import BaseModel, Field prompt = "Analyze the user review and determine the aspect, sentiment score, summary quote, and return risk" input_text = "The boots look amazing and the leather is high quality, but they run way too small. I'm sending them back." class ReviewAnalysis(BaseModel): aspect: str = Field(description="The feature mentioned (e.g., Price, Comfort, Style, Shipping)") summary_quote: str = Field(description="The specific phrase from the review about this aspect") sentiment_score: int = Field(description="1 to 5 (1=worst, 5=best)") is_return_risk: bool = Field(description="True if the user mentions returning the item") response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[prompt, input_text], config={ "response_mime_type": "application/json", "response_json_schema": ReviewAnalysis.model_json_schema(), }, ) print(response.text) # { # "aspect": "Size", # "summary_quote": "they run way too small", # "sentiment_score": 2, # "is_return_risk": true # } COMMAND_BLOCK: from pydantic import BaseModel, Field prompt = "Analyze the user review and determine the aspect, sentiment score, summary quote, and return risk" input_text = "The boots look amazing and the leather is high quality, but they run way too small. I'm sending them back." class ReviewAnalysis(BaseModel): aspect: str = Field(description="The feature mentioned (e.g., Price, Comfort, Style, Shipping)") summary_quote: str = Field(description="The specific phrase from the review about this aspect") sentiment_score: int = Field(description="1 to 5 (1=worst, 5=best)") is_return_risk: bool = Field(description="True if the user mentions returning the item") response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[prompt, input_text], config={ "response_mime_type": "application/json", "response_json_schema": ReviewAnalysis.model_json_schema(), }, ) print(response.text) # { # "aspect": "Size", # "summary_quote": "they run way too small", # "sentiment_score": 2, # "is_return_risk": true # } COMMAND_BLOCK: from pydantic import BaseModel, Field prompt = "Analyze the user review and determine the aspect, sentiment score, summary quote, and return risk" input_text = "The boots look amazing and the leather is high quality, but they run way too small. I'm sending them back." class ReviewAnalysis(BaseModel): aspect: str = Field(description="The feature mentioned (e.g., Price, Comfort, Style, Shipping)") summary_quote: str = Field(description="The specific phrase from the review about this aspect") sentiment_score: int = Field(description="1 to 5 (1=worst, 5=best)") is_return_risk: bool = Field(description="True if the user mentions returning the item") response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[prompt, input_text], config={ "response_mime_type": "application/json", "response_json_schema": ReviewAnalysis.model_json_schema(), }, ) print(response.text) # { # "aspect": "Size", # "summary_quote": "they run way too small", # "sentiment_score": 2, # "is_return_risk": true # } COMMAND_BLOCK: import httpx # Download PDF document doc_url = "https://storage.googleapis.com/generativeai-downloads/data/med_gemini.pdf" doc_data = httpx.get(doc_url).content prompt = "Summarize this document" response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[ types.Part.from_bytes( data=doc_data, mime_type='application/pdf', ), prompt ] ) print(response.text) COMMAND_BLOCK: import httpx # Download PDF document doc_url = "https://storage.googleapis.com/generativeai-downloads/data/med_gemini.pdf" doc_data = httpx.get(doc_url).content prompt = "Summarize this document" response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[ types.Part.from_bytes( data=doc_data, mime_type='application/pdf', ), prompt ] ) print(response.text) COMMAND_BLOCK: import httpx # Download PDF document doc_url = "https://storage.googleapis.com/generativeai-downloads/data/med_gemini.pdf" doc_data = httpx.get(doc_url).content prompt = "Summarize this document" response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=[ types.Part.from_bytes( data=doc_data, mime_type='application/pdf', ), prompt ] ) print(response.text) COMMAND_BLOCK: FLASH_MODEL = 'flash' PRO_MODEL = 'pro' CLASSIFIER_SYSTEM_PROMPT = f""" You are a specialized Task Routing AI. Your sole function is to analyze the user's request and classify its complexity. Choose between `{FLASH_MODEL}` (SIMPLE) or `{PRO_MODEL}` (COMPLEX). 1. `{FLASH_MODEL}`: A fast, efficient model for simple, well-defined tasks. 2. `{PRO_MODEL}`: A powerful, advanced model for complex, open-ended, or multi-step tasks. A task is COMPLEX if it meets ONE OR MORE of the following criteria: 1. High Operational Complexity (Est. 4+ Steps/Tool Calls) 2. Strategic Planning and Conceptual Design 3. High Ambiguity or Large Scope 4. Deep Debugging and Root Cause Analysis A task is SIMPLE if it is highly specific, bounded, and has Low Operational Complexity (Est. 1-3 tool calls). """ user_input = "I'm getting an error 'Cannot read property 'map' of undefined' when I click the save button. Can you fix it?" response_schema = { "type": "object", "properties": { "reasoning": { "type": "string", "description": "A brief, step-by-step explanation for the model choice, referencing the rubric." }, "model_choice": { "type": "string", "enum": [FLASH_MODEL, PRO_MODEL] } }, "required": ["reasoning", "model_choice"] } response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=user_input, config={ "system_instruction": CLASSIFIER_SYSTEM_PROMPT, "response_mime_type": "application/json", "response_json_schema": response_schema }, ) print(response.text) # { # "reasoning": "The user is reporting an error symptom without a known cause. This requires investigation to identify the root cause, which falls under 'Deep Debugging & Root Cause Analysis'.", # "model_choice": "pro" # } COMMAND_BLOCK: FLASH_MODEL = 'flash' PRO_MODEL = 'pro' CLASSIFIER_SYSTEM_PROMPT = f""" You are a specialized Task Routing AI. Your sole function is to analyze the user's request and classify its complexity. Choose between `{FLASH_MODEL}` (SIMPLE) or `{PRO_MODEL}` (COMPLEX). 1. `{FLASH_MODEL}`: A fast, efficient model for simple, well-defined tasks. 2. `{PRO_MODEL}`: A powerful, advanced model for complex, open-ended, or multi-step tasks. A task is COMPLEX if it meets ONE OR MORE of the following criteria: 1. High Operational Complexity (Est. 4+ Steps/Tool Calls) 2. Strategic Planning and Conceptual Design 3. High Ambiguity or Large Scope 4. Deep Debugging and Root Cause Analysis A task is SIMPLE if it is highly specific, bounded, and has Low Operational Complexity (Est. 1-3 tool calls). """ user_input = "I'm getting an error 'Cannot read property 'map' of undefined' when I click the save button. Can you fix it?" response_schema = { "type": "object", "properties": { "reasoning": { "type": "string", "description": "A brief, step-by-step explanation for the model choice, referencing the rubric." }, "model_choice": { "type": "string", "enum": [FLASH_MODEL, PRO_MODEL] } }, "required": ["reasoning", "model_choice"] } response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=user_input, config={ "system_instruction": CLASSIFIER_SYSTEM_PROMPT, "response_mime_type": "application/json", "response_json_schema": response_schema }, ) print(response.text) # { # "reasoning": "The user is reporting an error symptom without a known cause. This requires investigation to identify the root cause, which falls under 'Deep Debugging & Root Cause Analysis'.", # "model_choice": "pro" # } COMMAND_BLOCK: FLASH_MODEL = 'flash' PRO_MODEL = 'pro' CLASSIFIER_SYSTEM_PROMPT = f""" You are a specialized Task Routing AI. Your sole function is to analyze the user's request and classify its complexity. Choose between `{FLASH_MODEL}` (SIMPLE) or `{PRO_MODEL}` (COMPLEX). 1. `{FLASH_MODEL}`: A fast, efficient model for simple, well-defined tasks. 2. `{PRO_MODEL}`: A powerful, advanced model for complex, open-ended, or multi-step tasks. A task is COMPLEX if it meets ONE OR MORE of the following criteria: 1. High Operational Complexity (Est. 4+ Steps/Tool Calls) 2. Strategic Planning and Conceptual Design 3. High Ambiguity or Large Scope 4. Deep Debugging and Root Cause Analysis A task is SIMPLE if it is highly specific, bounded, and has Low Operational Complexity (Est. 1-3 tool calls). """ user_input = "I'm getting an error 'Cannot read property 'map' of undefined' when I click the save button. Can you fix it?" response_schema = { "type": "object", "properties": { "reasoning": { "type": "string", "description": "A brief, step-by-step explanation for the model choice, referencing the rubric." }, "model_choice": { "type": "string", "enum": [FLASH_MODEL, PRO_MODEL] } }, "required": ["reasoning", "model_choice"] } response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents=user_input, config={ "system_instruction": CLASSIFIER_SYSTEM_PROMPT, "response_mime_type": "application/json", "response_json_schema": response_schema }, ) print(response.text) # { # "reasoning": "The user is reporting an error symptom without a known cause. This requires investigation to identify the root cause, which falls under 'Deep Debugging & Root Cause Analysis'.", # "model_choice": "pro" # } CODE_BLOCK: response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents="How does AI work?", config={ "thinking_config": {"thinking_level": "high"} }, ) print(response.text) CODE_BLOCK: response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents="How does AI work?", config={ "thinking_config": {"thinking_level": "high"} }, ) print(response.text) CODE_BLOCK: response = client.models.generate_content( model="gemini-3.1-flash-lite-preview", contents="How does AI work?", config={ "thinking_config": {"thinking_level": "high"} }, ) print(response.text) COMMAND_BLOCK: # Create a JSONL file with your requests and upload it uploaded_batch_requests = client.files.upload(file="batch_requests.json") # Create the batch job batch_job = client.batches.create( model="gemini-3.1-flash-lite-preview", src=uploaded_batch_requests.name, config={'display_name': "batch_job-1"} ) print(f"Created batch job: {batch_job.name}") # Wait for up to 24 hours if batch_job.state.name == 'JOB_STATE_SUCCEEDED': result_file_name = batch_job.dest.file_name file_content_bytes = client.files.download(file=result_file_name) file_content = file_content_bytes.decode('utf-8') for line in file_content.splitlines(): print(line COMMAND_BLOCK: # Create a JSONL file with your requests and upload it uploaded_batch_requests = client.files.upload(file="batch_requests.json") # Create the batch job batch_job = client.batches.create( model="gemini-3.1-flash-lite-preview", src=uploaded_batch_requests.name, config={'display_name': "batch_job-1"} ) print(f"Created batch job: {batch_job.name}") # Wait for up to 24 hours if batch_job.state.name == 'JOB_STATE_SUCCEEDED': result_file_name = batch_job.dest.file_name file_content_bytes = client.files.download(file=result_file_name) file_content = file_content_bytes.decode('utf-8') for line in file_content.splitlines(): print(line COMMAND_BLOCK: # Create a JSONL file with your requests and upload it uploaded_batch_requests = client.files.upload(file="batch_requests.json") # Create the batch job batch_job = client.batches.create( model="gemini-3.1-flash-lite-preview", src=uploaded_batch_requests.name, config={'display_name': "batch_job-1"} ) print(f"Created batch job: {batch_job.name}") # Wait for up to 24 hours if batch_job.state.name == 'JOB_STATE_SUCCEEDED': result_file_name = batch_job.dest.file_name file_content_bytes = client.files.download(file=result_file_name) file_content = file_content_bytes.decode('utf-8') for line in file_content.splitlines(): print(line - Gemini 3.1 Flash-Lite model card - Gemini 3 developer guide - AI Studio model playground