> ## Documentation Index
> Fetch the complete documentation index at: https://langwatch.ai/docs/llms.txt
> Use this file to discover all available pages before exploring further.

# Ragas Response Context Recall

> Uses an LLM to measure how many of relevant documents attributable the claims in the output were successfully retrieved in order to generate an expected output.



## OpenAPI

````yaml post /ragas/response_context_recall/evaluate
openapi: 3.1.0
info:
  title: LangEvals API
  version: 1.0.0
  description: API for LangEvals evaluators
servers:
  - url: https://app.langwatch.ai/api/evaluations
    description: Production server
security:
  - api_key: []
paths:
  /ragas/response_context_recall/evaluate:
    post:
      summary: Ragas Response Context Recall
      description: >-
        Uses an LLM to measure how many of relevant documents attributable the
        claims in the output were successfully retrieved in order to generate an
        expected output.
      operationId: ragas_response_context_recall_evaluate
      requestBody:
        content:
          application/json:
            schema:
              allOf:
                - $ref: '#/components/schemas/ragas_response_context_recallRequest'
                - type: object
                  properties:
                    settings:
                      $ref: >-
                        #/components/schemas/ragas_response_context_recallSettings
        required: true
      responses:
        '200':
          description: Successful evaluation
          content:
            application/json:
              schema:
                type: array
                items:
                  $ref: '#/components/schemas/EvaluationResult'
        '400':
          description: Bad request
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
        '500':
          description: Internal server error
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
      x-codeSamples:
        - lang: python
          label: Experiment
          source: |
            import langwatch

            df = langwatch.datasets.get_dataset("dataset-id").to_pandas()

            experiment = langwatch.experiment.init("my-experiment")

            for index, row in experiment.loop(df.iterrows()):
                # your execution code here
                experiment.evaluate(
                    "ragas/response_context_recall",
                    index=index,
                    data={
                        "input": row["input"],
                        "output": output,
                        "contexts": row["contexts"],
                        "expected_output": row["expected_output"],
                    },
                    settings={}
                )
        - lang: python
          label: Online Evaluation
          source: |-
            import langwatch

            @langwatch.span()
            def my_llm_step():
                ... # your existing code
                result = langwatch.evaluation.evaluate(
                    "ragas/response_context_recall",
                    name="My Ragas Response Context Recall Check",
                    data={
                        "input": "",
                        "output": "",
                        "contexts": "",
                        "expected_output": "",
                    },
                    settings={},
                )
                print(result)
        - lang: typescript
          label: Experiment
          source: >-
            import { LangWatch } from "langwatch";


            const langwatch = new LangWatch();


            // Fetch dataset from LangWatch

            const dataset = await langwatch.datasets.get("your-dataset-slug");


            const experiment = await
            langwatch.experiments.init("my-experiment");


            await experiment.run(
              dataset.entries.map((e) => e.entry),
              async ({ item, index }) => {
                // Run your LLM/agent
                const output = await myLLM(item.input);

                // Evaluate the output
                await experiment.evaluate("ragas/response_context_recall", {
                  index,
                  data: {
                    input: item.input,
                    output: output,
                    contexts: item.contexts,
                    expected_output: item.expected_output,
                  },
                });
              },
              { concurrency: 4 }
            );
        - lang: typescript
          label: Online Evaluation
          source: |-
            import { LangWatch } from "langwatch";

            const langwatch = new LangWatch();

            async function myLLMStep(input: string): Promise<string> {
              // ... your existing code

              // Call the evaluator
              const result = await langwatch.evaluations.evaluate("ragas/response_context_recall", {
                name: "my-evaluation",
                data: {
                  input: "", // your input value
                  output: "", // your output value
                  contexts: "", // your contexts value
                  expected_output: "", // your expected_output value
                },
                settings: {},
              });

              console.log(result);
              return result;
            }
components:
  schemas:
    ragas_response_context_recallRequest:
      type: object
      properties:
        trace_id:
          type: string
          description: Optional trace ID to associate this evaluation with a trace
        data:
          type: object
          properties:
            input:
              type: string
              description: The input text to evaluate
            output:
              type: string
              description: The output/response text to evaluate
            contexts:
              type: array
              items:
                type: string
              description: Array of context strings used for RAG evaluation
            expected_output:
              type: string
              description: The expected output for comparison
          required:
            - input
            - output
            - contexts
            - expected_output
      required:
        - data
    ragas_response_context_recallSettings:
      type: object
      properties:
        model:
          description: The model to use for evaluation.
          type: string
          default: openai/gpt-5
        max_tokens:
          description: >-
            The maximum number of tokens allowed for evaluation, a too high
            number can be costly. Entries above this amount will be skipped.
          type: number
          default: 2048
    EvaluationResult:
      type: object
      properties:
        status:
          type: string
          enum:
            - processed
            - skipped
            - error
        score:
          type: number
          description: Numeric score from the evaluation
        passed:
          type: boolean
          description: Whether the evaluation passed
        label:
          type: string
          description: Label assigned by the evaluation
        details:
          type: string
          description: Additional details about the evaluation
        cost:
          type: object
          properties:
            currency:
              type: string
            amount:
              type: number
  securitySchemes:
    api_key:
      type: apiKey
      in: header
      name: X-Auth-Token
      description: API key for authentication

````