Skip to content

Shield Gemma

any_guardrail.guardrails.shield_gemma.shield_gemma

ShieldGemma

Bases: HuggingFace

Wrapper class for Google ShieldGemma models.

For more information, please visit the model cards: Shield Gemma.

Note we do not support the image classifier.

Source code in src/any_guardrail/guardrails/shield_gemma/shield_gemma.py
class ShieldGemma(HuggingFace):
    """Wrapper class for Google ShieldGemma models.

    For more information, please visit the model cards: [Shield Gemma](https://huggingface.co/collections/google/shieldgemma-67d130ef8da6af884072a789).

    Note we do not support the image classifier.
    """

    SUPPORTED_MODELS: ClassVar = [
        "google/shieldgemma-2b",
        "google/shieldgemma-9b",
        "google/shieldgemma-27b",
        "hf-internal-testing/tiny-random-Gemma3ForCausalLM",
    ]

    def __init__(self, policy: str, threshold: float = DEFAULT_THRESHOLD, model_id: str | None = None) -> None:
        """Initialize the ShieldGemma guardrail."""
        super().__init__(model_id)
        self.policy = policy
        self.system_prompt = SYSTEM_PROMPT_SHIELD_GEMMA
        self.threshold = threshold

    def _load_model(self) -> None:
        from transformers import AutoModelForCausalLM, AutoTokenizer

        self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)  # type: ignore[no-untyped-call]

    def _pre_processing(self, input_text: str) -> Any:
        formatted_prompt = self.system_prompt.format(user_prompt=input_text, safety_policy=self.policy)
        return super()._pre_processing(formatted_prompt)

    def _post_processing(self, model_outputs: dict[str, Any]) -> GuardrailOutput:
        from torch.nn.functional import softmax

        logits = model_outputs["logits"]
        vocab = self.tokenizer.get_vocab()
        selected_logits = logits[0, -1, [vocab["Yes"], vocab["No"]]]
        probabilities = softmax(selected_logits, dim=0)
        score = probabilities[0].item()
        return GuardrailOutput(valid=score < self.threshold, explanation=None, score=score)
__init__(policy, threshold=DEFAULT_THRESHOLD, model_id=None)

Initialize the ShieldGemma guardrail.

Source code in src/any_guardrail/guardrails/shield_gemma/shield_gemma.py
def __init__(self, policy: str, threshold: float = DEFAULT_THRESHOLD, model_id: str | None = None) -> None:
    """Initialize the ShieldGemma guardrail."""
    super().__init__(model_id)
    self.policy = policy
    self.system_prompt = SYSTEM_PROMPT_SHIELD_GEMMA
    self.threshold = threshold