> ## Documentation Index
> Fetch the complete documentation index at: https://langwatch.ai/docs/llms.txt
> Use this file to discover all available pages before exploring further.

# Ragas Context Relevancy

> This metric gauges the relevancy of the retrieved context, calculated based on both the question and contexts. The values fall within the range of (0, 1), with higher values indicating better relevancy.



## OpenAPI

````yaml post /legacy/ragas_context_relevancy/evaluate
openapi: 3.1.0
info:
  title: LangEvals API
  version: 1.0.0
  description: API for LangEvals evaluators
servers:
  - url: https://app.langwatch.ai/api/evaluations
    description: Production server
security:
  - api_key: []
paths:
  /legacy/ragas_context_relevancy/evaluate:
    post:
      summary: Ragas Context Relevancy
      description: >-
        This metric gauges the relevancy of the retrieved context, calculated
        based on both the question and contexts. The values fall within the
        range of (0, 1), with higher values indicating better relevancy.
      operationId: legacy_ragas_context_relevancy_evaluate
      requestBody:
        content:
          application/json:
            schema:
              allOf:
                - $ref: '#/components/schemas/legacy_ragas_context_relevancyRequest'
                - type: object
                  properties:
                    settings:
                      $ref: >-
                        #/components/schemas/legacy_ragas_context_relevancySettings
        required: true
      responses:
        '200':
          description: Successful evaluation
          content:
            application/json:
              schema:
                type: array
                items:
                  $ref: '#/components/schemas/EvaluationResult'
        '400':
          description: Bad request
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
        '500':
          description: Internal server error
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
      x-codeSamples:
        - lang: python
          label: Experiment
          source: |
            import langwatch

            df = langwatch.datasets.get_dataset("dataset-id").to_pandas()

            experiment = langwatch.experiment.init("my-experiment")

            for index, row in experiment.loop(df.iterrows()):
                # your execution code here
                experiment.evaluate(
                    "legacy/ragas_context_relevancy",
                    index=index,
                    data={
                        "output": output,
                        "contexts": row["contexts"],
                    },
                    settings={}
                )
        - lang: python
          label: Online Evaluation
          source: |-
            import langwatch

            @langwatch.span()
            def my_llm_step():
                ... # your existing code
                result = langwatch.evaluation.evaluate(
                    "legacy/ragas_context_relevancy",
                    name="My Ragas Context Relevancy Check",
                    data={
                        "output": "",
                        "contexts": "",
                    },
                    settings={},
                )
                print(result)
        - lang: typescript
          label: Experiment
          source: >-
            import { LangWatch } from "langwatch";


            const langwatch = new LangWatch();


            // Fetch dataset from LangWatch

            const dataset = await langwatch.datasets.get("your-dataset-slug");


            const experiment = await
            langwatch.experiments.init("my-experiment");


            await experiment.run(
              dataset.entries.map((e) => e.entry),
              async ({ item, index }) => {
                // Run your LLM/agent
                const output = await myLLM(item.input);

                // Evaluate the output
                await experiment.evaluate("legacy/ragas_context_relevancy", {
                  index,
                  data: {
                    output: output,
                    contexts: item.contexts,
                  },
                });
              },
              { concurrency: 4 }
            );
        - lang: typescript
          label: Online Evaluation
          source: |-
            import { LangWatch } from "langwatch";

            const langwatch = new LangWatch();

            async function myLLMStep(input: string): Promise<string> {
              // ... your existing code

              // Call the evaluator
              const result = await langwatch.evaluations.evaluate("legacy/ragas_context_relevancy", {
                name: "my-evaluation",
                data: {
                  output: "", // your output value
                  contexts: "", // your contexts value
                },
                settings: {},
              });

              console.log(result);
              return result;
            }
components:
  schemas:
    legacy_ragas_context_relevancyRequest:
      type: object
      properties:
        trace_id:
          type: string
          description: Optional trace ID to associate this evaluation with a trace
        data:
          type: object
          properties:
            output:
              type: string
              description: The output/response text to evaluate
            contexts:
              type: array
              items:
                type: string
              description: Array of context strings used for RAG evaluation
          required:
            - output
            - contexts
      required:
        - data
    legacy_ragas_context_relevancySettings:
      type: object
      properties:
        model:
          description: The model to use for evaluation.
          type: string
          default: openai/gpt-5
        embeddings_model:
          description: The model to use for embeddings.
          type: string
          default: openai/text-embedding-ada-002
        max_tokens:
          description: >-
            The maximum number of tokens allowed for evaluation, a too high
            number can be costly. Entries above this amount will be skipped.
          type: number
          default: 2048
    EvaluationResult:
      type: object
      properties:
        status:
          type: string
          enum:
            - processed
            - skipped
            - error
        score:
          type: number
          description: Numeric score from the evaluation
        passed:
          type: boolean
          description: Whether the evaluation passed
        label:
          type: string
          description: Label assigned by the evaluation
        details:
          type: string
          description: Additional details about the evaluation
        cost:
          type: object
          properties:
            currency:
              type: string
            amount:
              type: number
  securitySchemes:
    api_key:
      type: apiKey
      in: header
      name: X-Auth-Token
      description: API key for authentication

````