> ## Documentation Index
> Fetch the complete documentation index at: https://langwatch.ai/docs/llms.txt
> Use this file to discover all available pages before exploring further.

# Context Precision

> Measures how accurate is the retrieval compared to expected contexts, increasing it means less noise in the retrieval. Uses traditional string distance metrics.



## OpenAPI

````yaml post /ragas/context_precision/evaluate
openapi: 3.1.0
info:
  title: LangEvals API
  version: 1.0.0
  description: API for LangEvals evaluators
servers:
  - url: https://app.langwatch.ai/api/evaluations
    description: Production server
security:
  - api_key: []
paths:
  /ragas/context_precision/evaluate:
    post:
      summary: Context Precision
      description: >-
        Measures how accurate is the retrieval compared to expected contexts,
        increasing it means less noise in the retrieval. Uses traditional string
        distance metrics.
      operationId: ragas_context_precision_evaluate
      requestBody:
        content:
          application/json:
            schema:
              allOf:
                - $ref: '#/components/schemas/ragas_context_precisionRequest'
                - type: object
                  properties:
                    settings:
                      $ref: '#/components/schemas/ragas_context_precisionSettings'
        required: true
      responses:
        '200':
          description: Successful evaluation
          content:
            application/json:
              schema:
                type: array
                items:
                  $ref: '#/components/schemas/EvaluationResult'
        '400':
          description: Bad request
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
        '500':
          description: Internal server error
          content:
            application/json:
              schema:
                type: object
                properties:
                  detail:
                    type: string
      x-codeSamples:
        - lang: python
          label: Experiment
          source: |
            import langwatch

            df = langwatch.datasets.get_dataset("dataset-id").to_pandas()

            experiment = langwatch.experiment.init("my-experiment")

            for index, row in experiment.loop(df.iterrows()):
                # your execution code here
                experiment.evaluate(
                    "ragas/context_precision",
                    index=index,
                    data={
                        "contexts": row["contexts"],
                        "expected_contexts": row["expected_contexts"],
                    },
                    settings={}
                )
        - lang: python
          label: Online Evaluation
          source: |-
            import langwatch

            @langwatch.span()
            def my_llm_step():
                ... # your existing code
                result = langwatch.evaluation.evaluate(
                    "ragas/context_precision",
                    name="My Context Precision Check",
                    data={
                        "contexts": "",
                        "expected_contexts": "",
                    },
                    settings={},
                )
                print(result)
        - lang: typescript
          label: Experiment
          source: >-
            import { LangWatch } from "langwatch";


            const langwatch = new LangWatch();


            // Fetch dataset from LangWatch

            const dataset = await langwatch.datasets.get("your-dataset-slug");


            const experiment = await
            langwatch.experiments.init("my-experiment");


            await experiment.run(
              dataset.entries.map((e) => e.entry),
              async ({ item, index }) => {
                // Run your LLM/agent
                const output = await myLLM(item.input);

                // Evaluate the output
                await experiment.evaluate("ragas/context_precision", {
                  index,
                  data: {
                    contexts: item.contexts,
                    expected_contexts: item.expected_contexts,
                  },
                });
              },
              { concurrency: 4 }
            );
        - lang: typescript
          label: Online Evaluation
          source: |-
            import { LangWatch } from "langwatch";

            const langwatch = new LangWatch();

            async function myLLMStep(input: string): Promise<string> {
              // ... your existing code

              // Call the evaluator
              const result = await langwatch.evaluations.evaluate("ragas/context_precision", {
                name: "my-evaluation",
                data: {
                  contexts: "", // your contexts value
                  expected_contexts: "", // your expected_contexts value
                },
                settings: {},
              });

              console.log(result);
              return result;
            }
components:
  schemas:
    ragas_context_precisionRequest:
      type: object
      properties:
        trace_id:
          type: string
          description: Optional trace ID to associate this evaluation with a trace
        data:
          type: object
          properties:
            contexts:
              type: array
              items:
                type: string
              description: Array of context strings used for RAG evaluation
            expected_contexts:
              type: array
              items:
                type: string
              description: The expected contexts for comparison
          required:
            - contexts
            - expected_contexts
      required:
        - data
    ragas_context_precisionSettings:
      type: object
      properties:
        distance_measure:
          type: string
          default: levenshtein
    EvaluationResult:
      type: object
      properties:
        status:
          type: string
          enum:
            - processed
            - skipped
            - error
        score:
          type: number
          description: Numeric score from the evaluation
        passed:
          type: boolean
          description: Whether the evaluation passed
        label:
          type: string
          description: Label assigned by the evaluation
        details:
          type: string
          description: Additional details about the evaluation
        cost:
          type: object
          properties:
            currency:
              type: string
            amount:
              type: number
  securitySchemes:
    api_key:
      type: apiKey
      in: header
      name: X-Auth-Token
      description: API key for authentication

````