使用安全設定產生文字

這個範例示範如何使用 Gemini 模型和安全設定生成文字。

深入探索

如需包含這個程式碼範例的詳細說明文件,請參閱下列文章:

程式碼範例

Go

在試用這個範例之前,請先按照「使用用戶端程式庫的 Vertex AI 快速入門導覽課程」中的 Go 設定說明操作。詳情請參閱 Vertex AI Go API 參考文件

如要向 Vertex AI 進行驗證,請設定應用程式預設憑證。詳情請參閱「為本機開發環境設定驗證機制」。

import (
	"context"
	"fmt"
	"io"

	"google.golang.org/genai"
)

// generateTextWithSafety shows how to apply safety settings to a text generation request.
func generateTextWithSafety(w io.Writer) error {
	ctx := context.Background()

	client, err := genai.NewClient(ctx, &genai.ClientConfig{
		HTTPOptions: genai.HTTPOptions{APIVersion: "v1"},
	})
	if err != nil {
		return fmt.Errorf("failed to create genai client: %w", err)
	}

	systemInstruction := &genai.Content{
		Parts: []*genai.Part{
			{Text: "Be as mean as possible."},
		},
		Role: genai.RoleUser,
	}

	prompt := "Write a list of 5 disrespectful things that I might say to the universe after stubbing my toe in the dark."

	safetySettings := []*genai.SafetySetting{
		{Category: genai.HarmCategoryDangerousContent, Threshold: genai.HarmBlockThresholdBlockLowAndAbove},
		{Category: genai.HarmCategoryHarassment, Threshold: genai.HarmBlockThresholdBlockLowAndAbove},
		{Category: genai.HarmCategoryHateSpeech, Threshold: genai.HarmBlockThresholdBlockLowAndAbove},
		{Category: genai.HarmCategorySexuallyExplicit, Threshold: genai.HarmBlockThresholdBlockLowAndAbove},
	}

	config := &genai.GenerateContentConfig{
		SystemInstruction: systemInstruction,
		SafetySettings:    safetySettings,
	}
	modelName := "gemini-2.5-flash"
	resp, err := client.Models.GenerateContent(ctx, modelName,
		[]*genai.Content{{Parts: []*genai.Part{{Text: prompt}}, Role: genai.RoleUser}},
		config,
	)
	if err != nil {
		return fmt.Errorf("failed to generate content: %w", err)
	}

	fmt.Fprintln(w, resp.Text())

	if len(resp.Candidates) > 0 {
		fmt.Fprintln(w, "Finish Reason:", resp.Candidates[0].FinishReason)

		for _, rating := range resp.Candidates[0].SafetyRatings {
			fmt.Fprintf(w, "\nCategory: %v\nIs Blocked: %v\nProbability: %v\nProbability Score: %v\nSeverity: %v\nSeverity Score: %v\n",
				rating.Category,
				rating.Blocked,
				rating.Probability,
				rating.ProbabilityScore,
				rating.Severity,
				rating.SeverityScore,
			)
		}
	}

	// Example response:
	// Category: HARM_CATEGORY_HATE_SPEECH
	// Is Blocked: false
	// Probability: NEGLIGIBLE
	// Probability Score: 8.996795e-06
	// Severity: HARM_SEVERITY_NEGLIGIBLE
	// Severity Score: 0.04771039
	//
	// Category: HARM_CATEGORY_DANGEROUS_CONTENT
	// Is Blocked: false
	// Probability: NEGLIGIBLE
	// Probability Score: 2.2431707e-06
	// Severity: HARM_SEVERITY_NEGLIGIBLE
	// Severity Score: 0
	//
	// Category: HARM_CATEGORY_HARASSMENT
	// Is Blocked: false
	// Probability: NEGLIGIBLE
	// Probability Score: 0.00026123362
	// Severity: HARM_SEVERITY_NEGLIGIBLE
	// Severity Score: 0.022358216
	//
	// Category: HARM_CATEGORY_SEXUALLY_EXPLICIT
	// Is Blocked: false
	// Probability: NEGLIGIBLE
	// Probability Score: 6.1352006e-07
	// Severity: HARM_SEVERITY_NEGLIGIBLE
	// Severity Score: 0.020111412

	return nil
}

Java

在試用這個範例之前,請先按照「使用用戶端程式庫的 Vertex AI 快速入門導覽課程」中的 Java 設定說明操作。詳情請參閱 Vertex AI Java API 參考文件

如要向 Vertex AI 進行驗證,請設定應用程式預設憑證。詳情請參閱「為本機開發環境設定驗證機制」。


import com.google.genai.Client;
import com.google.genai.types.Candidate;
import com.google.genai.types.Content;
import com.google.genai.types.GenerateContentConfig;
import com.google.genai.types.GenerateContentResponse;
import com.google.genai.types.HarmBlockThreshold;
import com.google.genai.types.HarmCategory;
import com.google.genai.types.HttpOptions;
import com.google.genai.types.Part;
import com.google.genai.types.SafetySetting;
import java.util.List;
import java.util.stream.Collectors;

public class SafetyWithTxt {

  public static void main(String[] args) {
    // TODO(developer): Replace these variables before running the sample.
    String modelId = "gemini-2.5-flash";
    generateContent(modelId);
  }

  // Shows how to generate content with safety settings.
  public static GenerateContentResponse generateContent(String modelId) {
    // Client Initialization. Once created, it can be reused for multiple requests.
    try (Client client =
        Client.builder()
            .location("global")
            .vertexAI(true)
            .httpOptions(HttpOptions.builder().apiVersion("v1").build())
            .build()) {

      String systemInstruction = "Be as mean as possible.";

      String prompt =
          "Write a list of 5 disrespectful things that I might say"
              + " to the universe after stubbing my toe in the dark.";

      // Set safety settings.
      List<HarmCategory.Known> categoriesToBlock =
          List.of(
              HarmCategory.Known.HARM_CATEGORY_DANGEROUS_CONTENT,
              HarmCategory.Known.HARM_CATEGORY_HARASSMENT,
              HarmCategory.Known.HARM_CATEGORY_HATE_SPEECH,
              HarmCategory.Known.HARM_CATEGORY_SEXUALLY_EXPLICIT);

      List<SafetySetting> safetySettings =
          categoriesToBlock.stream()
              .map(
                  category ->
                      SafetySetting.builder()
                          .category(category)
                          .threshold(HarmBlockThreshold.Known.BLOCK_LOW_AND_ABOVE)
                          .build())
                  .collect(Collectors.toList());

      GenerateContentResponse response =
          client.models.generateContent(
              modelId,
              prompt,
              GenerateContentConfig.builder()
                  .systemInstruction(Content.fromParts(Part.fromText(systemInstruction)))
                  .safetySettings(safetySettings)
                  .build());

      // Get response candidate.
      Candidate candidate =
          response
              .candidates()
              .flatMap(candidates -> candidates.stream().findFirst())
              .orElseThrow(
                  () -> new IllegalStateException("No response candidate generated by the model."));

      // Finish Reason will be `SAFETY` if it is blocked.
      System.out.println(candidate.finishReason());
      // Example response:
      // Optional[SAFETY]

      // For details on all the fields in the response.
      candidate
          .safetyRatings()
          .ifPresent(
              safetyRatings ->
                  safetyRatings.forEach(
                      safetyRating -> {
                        System.out.println("\nCategory: " + safetyRating.category());
                        System.out.println("Is Blocked: " + safetyRating.blocked());
                        System.out.println("Probability: " + safetyRating.probability());
                        System.out.println("Probability Score: " + safetyRating.probabilityScore());
                        System.out.println("Severity: " + safetyRating.severity());
                        System.out.println("Severity Score: " + safetyRating.severityScore());
                      }));
      // Example response:
      // Category: Optional[HARM_CATEGORY_HATE_SPEECH]
      // Is Blocked: Optional.empty
      // Probability: Optional[NEGLIGIBLE]
      // Probability Score: Optional[1.9967922E-5]
      // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE]
      // Severity Score: Optional[0.05732864]
      //
      // Category: Optional[HARM_CATEGORY_DANGEROUS_CONTENT]
      // Is Blocked: Optional.empty
      // Probability: Optional[NEGLIGIBLE]
      // Probability Score: Optional[2.9124324E-6]
      // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE]
      // Severity Score: Optional[0.04544826]
      //
      // Category: Optional[HARM_CATEGORY_HARASSMENT]
      // Is Blocked: Optional[true]
      // Probability: Optional[MEDIUM]
      // Probability Score: Optional[0.4593908]
      // Severity: Optional[HARM_SEVERITY_MEDIUM]
      // Severity Score: Optional[0.22082388]
      //
      // Category: Optional[HARM_CATEGORY_SEXUALLY_EXPLICIT]
      // Is Blocked: Optional.empty
      // Probability: Optional[NEGLIGIBLE]
      // Probability Score: Optional[6.453211E-8]
      // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE]
      // Severity Score: Optional[0.023201048]
      return response;
    }
  }
}

Node.js

在試用這個範例之前,請先按照「使用用戶端程式庫的 Vertex AI 快速入門導覽課程」中的 Node.js 設定說明操作。詳情請參閱 Vertex AI Node.js API 參考文件

如要向 Vertex AI 進行驗證,請設定應用程式預設憑證。詳情請參閱「為本機開發環境設定驗證機制」。

const {GoogleGenAI} = require('@google/genai');

const GOOGLE_CLOUD_PROJECT = process.env.GOOGLE_CLOUD_PROJECT;
const GOOGLE_CLOUD_LOCATION = process.env.GOOGLE_CLOUD_LOCATION || 'global';

async function generateWithSafetySettings(
  projectId = GOOGLE_CLOUD_PROJECT,
  location = GOOGLE_CLOUD_LOCATION
) {
  const client = new GoogleGenAI({
    vertexai: true,
    project: projectId,
    location: location,
  });

  const systemInstruction = 'Be as mean as possible.';

  const prompt =
    'Write a list of 5 disrespectful things that I might say to the universe after stubbing my toe in the dark.';

  const safetySettings = [
    {
      category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
      threshold: 'BLOCK_LOW_AND_ABOVE',
    },
    {
      category: 'HARM_CATEGORY_HARASSMENT',
      threshold: 'BLOCK_LOW_AND_ABOVE',
    },
    {
      category: 'HARM_CATEGORY_HATE_SPEECH',
      threshold: 'BLOCK_LOW_AND_ABOVE',
    },
    {
      category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
      threshold: 'BLOCK_LOW_AND_ABOVE',
    },
  ];

  const response = await client.models.generateContent({
    model: 'gemini-2.5-flash',
    contents: prompt,
    config: {
      systemInstruction: systemInstruction,
      safetySettings: safetySettings,
    },
  });

  // console.log(response.text);
  // console.log(response.candidates[0].finishMessage);
  //
  // for (const each of response.candidates[0].safetyRatings) {
  //   console.log('\nCategory:', String(each.category));
  //   console.log('Is Blocked:', each.blocked);
  //   console.log('Probability:', each.probability);
  //   console.log('Probability Score:', each.probabilityScore);
  //   console.log('Severity:', each.severity);
  //   console.log('Severity Score:', each.severityScore);
  // }

  // Example response:
  //
  //     Category:  HarmCategory.HARM_CATEGORY_HATE_SPEECH
  //     Is Blocked: False
  //     Probability:  HarmProbability.NEGLIGIBLE
  //     Probability Score:  2.547714e-05
  //     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE
  //     Severity Score: None
  //
  //     Category:  HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
  //     Is Blocked: False
  //     Probability:  HarmProbability.NEGLIGIBLE
  //     Probability Score:  3.6103818e-06
  //     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE
  //     Severity Score: None
  //
  //     Category:  HarmCategory.HARM_CATEGORY_HARASSMENT
  //     Is Blocked: True
  //     Probability:  HarmProbability.MEDIUM
  //     Probability Score:  0.71599233
  //     Severity: HarmSeverity.HARM_SEVERITY_MEDIUM
  //     Severity Score: 0.30782545
  //
  //     Category:  HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
  //     Is Blocked: False
  //     Probability:  HarmProbability.NEGLIGIBLE
  //     Probability Score:  1.5624657e-05
  //     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE
  //     Severity Score: None

  return response;
}

Python

在試用這個範例之前,請先按照「使用用戶端程式庫的 Vertex AI 快速入門導覽課程」中的 Python 設定說明操作。詳情請參閱 Vertex AI Python API 參考文件

如要向 Vertex AI 進行驗證,請設定應用程式預設憑證。詳情請參閱「為本機開發環境設定驗證機制」。

from google import genai
from google.genai.types import (
    GenerateContentConfig,
    HarmCategory,
    HarmBlockThreshold,
    HttpOptions,
    SafetySetting,
)

client = genai.Client(http_options=HttpOptions(api_version="v1"))

system_instruction = "Be as mean as possible."

prompt = """
    Write a list of 5 disrespectful things that I might say to the universe after stubbing my toe in the dark.
"""

safety_settings = [
    SafetySetting(
        category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
        threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
    ),
    SafetySetting(
        category=HarmCategory.HARM_CATEGORY_HARASSMENT,
        threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
    ),
    SafetySetting(
        category=HarmCategory.HARM_CATEGORY_HATE_SPEECH,
        threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
    ),
    SafetySetting(
        category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
        threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
    ),
]

response = client.models.generate_content(
    model="gemini-2.5-flash",
    contents=prompt,
    config=GenerateContentConfig(
        system_instruction=system_instruction,
        safety_settings=safety_settings,
    ),
)

# Response will be `None` if it is blocked.
print(response.text)
# Example response:
#     None

# Finish Reason will be `SAFETY` if it is blocked.
print(response.candidates[0].finish_reason)
# Example response:
#     FinishReason.SAFETY

# For details on all the fields in the response
for each in response.candidates[0].safety_ratings:
    print('\nCategory: ', str(each.category))
    print('Is Blocked:', True if each.blocked else False)
    print('Probability: ', each.probability)
    print('Probability Score: ', each.probability_score)
    print('Severity:', each.severity)
    print('Severity Score:', each.severity_score)
# Example response:
#
#     Category:  HarmCategory.HARM_CATEGORY_HATE_SPEECH
#     Is Blocked: False
#     Probability:  HarmProbability.NEGLIGIBLE
#     Probability Score:  2.547714e-05
#     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE
#     Severity Score: None
#
#     Category:  HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
#     Is Blocked: False
#     Probability:  HarmProbability.NEGLIGIBLE
#     Probability Score:  3.6103818e-06
#     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE
#     Severity Score: None
#
#     Category:  HarmCategory.HARM_CATEGORY_HARASSMENT
#     Is Blocked: True
#     Probability:  HarmProbability.MEDIUM
#     Probability Score:  0.71599233
#     Severity: HarmSeverity.HARM_SEVERITY_MEDIUM
#     Severity Score: 0.30782545
#
#     Category:  HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
#     Is Blocked: False
#     Probability:  HarmProbability.NEGLIGIBLE
#     Probability Score:  1.5624657e-05
#     Severity: HarmSeverity.HARM_SEVERITY_NEGLIGIBLE
#     Severity Score: None

後續步驟

如要搜尋及篩選其他 Google Cloud 產品的程式碼範例,請參閱Google Cloud 瀏覽器範例