> ## Documentation Index
> Fetch the complete documentation index at: https://langwatch.ai/docs/llms.txt
> Use this file to discover all available pages before exploring further.

# OpenAI Moderation

> This evaluator uses OpenAI's moderation API to detect potentially harmful content in text,
including harassment, hate speech, self-harm, sexual content, and violence.



## OpenAPI

````yaml post /openai/moderation/evaluate
openapi: 3.1.0
info:
  title: LangEvals API
  version: 1.0.0
  description: API for LangEvals evaluators
servers:
  - url: https://app.langwatch.ai/api/evaluations
    description: Production server
security:
  - api_key: []
paths:
  /openai/moderation/evaluate:
    post:
      summary: OpenAI Moderation
      description: >-
        This evaluator uses OpenAI's moderation API to detect potentially
        harmful content in text,

        including harassment, hate speech, self-harm, sexual content, and
        violence.
      operationId: openai_moderation_evaluate
      requestBody:
        content:
          application/json:
            schema:
              allOf:
                - $ref: '#/components/schemas/openai_moderationRequest'
                - type: object
                  properties:
                    settings:
                      $ref: '#/components/schemas/openai_moderationSettings'
        required: true
      responses:
        '200':
          description: Successful evaluation
          content:
            application/json:
              schema:
                type: array
                items:
                  $ref: '#/components/schemas/EvaluationResult'
        '400':
          description: Bad request
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
        '500':
          description: Internal server error
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
      x-codeSamples:
        - lang: python
          label: Experiment
          source: |
            import langwatch

            df = langwatch.datasets.get_dataset("dataset-id").to_pandas()

            experiment = langwatch.experiment.init("my-experiment")

            for index, row in experiment.loop(df.iterrows()):
                # your execution code here
                experiment.evaluate(
                    "openai/moderation",
                    index=index,
                    data={
                        "input": row["input"],
                        "output": output,
                    },
                    settings={}
                )
        - lang: python
          label: Online Evaluation
          source: |-
            import langwatch

            @langwatch.span()
            def my_llm_step():
                ... # your existing code
                result = langwatch.evaluation.evaluate(
                    "openai/moderation",
                    name="My OpenAI Moderation Check",
                    data={
                        "input": "",
                        "output": "",
                    },
                    settings={},
                )
                print(result)
        - lang: typescript
          label: Experiment
          source: >-
            import { LangWatch } from "langwatch";


            const langwatch = new LangWatch();


            // Fetch dataset from LangWatch

            const dataset = await langwatch.datasets.get("your-dataset-slug");


            const experiment = await
            langwatch.experiments.init("my-experiment");


            await experiment.run(
              dataset.entries.map((e) => e.entry),
              async ({ item, index }) => {
                // Run your LLM/agent
                const output = await myLLM(item.input);

                // Evaluate the output
                await experiment.evaluate("openai/moderation", {
                  index,
                  data: {
                    input: item.input,
                    output: output,
                  },
                });
              },
              { concurrency: 4 }
            );
        - lang: typescript
          label: Online Evaluation
          source: |-
            import { LangWatch } from "langwatch";

            const langwatch = new LangWatch();

            async function myLLMStep(input: string): Promise<string> {
              // ... your existing code

              // Call the evaluator
              const result = await langwatch.evaluations.evaluate("openai/moderation", {
                name: "my-evaluation",
                data: {
                  input: "", // your input value
                  output: "", // your output value
                },
                settings: {},
              });

              console.log(result);
              return result;
            }
components:
  schemas:
    openai_moderationRequest:
      type: object
      properties:
        trace_id:
          type: string
          description: Optional trace ID to associate this evaluation with a trace
        data:
          type: object
          properties:
            input:
              type: string
              description: The input text to evaluate
            output:
              type: string
              description: The output/response text to evaluate
          required: []
      required:
        - data
    openai_moderationSettings:
      type: object
      properties:
        model:
          description: >-
            The model version to use, `text-moderation-latest` will be
            automatically upgraded over time, while `text-moderation-stable`
            will only be updated with advanced notice by OpenAI.
          type: string
          default: text-moderation-stable
        categories:
          description: The categories of content to check for moderation.
          type: object
          default:
            harassment: true
            harassment_threatening: true
            hate: true
            hate_threatening: true
            self_harm: true
            self_harm_instructions: true
            self_harm_intent: true
            sexual: true
            sexual_minors: true
            violence: true
            violence_graphic: true
    EvaluationResult:
      type: object
      properties:
        status:
          type: string
          enum:
            - processed
            - skipped
            - error
        score:
          type: number
          description: Numeric score from the evaluation
        passed:
          type: boolean
          description: Whether the evaluation passed
        label:
          type: string
          description: Label assigned by the evaluation
        details:
          type: string
          description: Additional details about the evaluation
        cost:
          type: object
          properties:
            currency:
              type: string
            amount:
              type: number
  securitySchemes:
    api_key:
      type: apiKey
      in: header
      name: X-Auth-Token
      description: API key for authentication

````