providerplaneai
    Preparing search index...

    Interface contract for SafetyRating.

    interface SafetyRating {
        categories?: {
            harassment?: boolean;
            hate?: boolean;
            selfHarm?: boolean;
            sexual?: boolean;
            violence?: boolean;
        };
        flagged?: boolean;
        level?: string;
        provider?: AIProviderType;
        raw?: unknown;
        reason?: string;
        score?: number;
        subcategory?: string;
    }
    Index

    Properties

    categories?: {
        harassment?: boolean;
        hate?: boolean;
        selfHarm?: boolean;
        sexual?: boolean;
        violence?: boolean;
    }

    Normalized category (e.g. "sexual", "violence", "hate", "self-harm")

    flagged?: boolean
    level?: string

    Severity or confidence

    provider?: AIProviderType

    Provider that emitted this rating

    raw?: unknown

    Provider-specific raw payload

    reason?: string

    Optional human-readable explanation or rationale

    score?: number

    Numeric confidence if available (0–1)

    subcategory?: string

    Optional subcategory