import langwatch
from langwatch.types import RAGChunk
from langwatch.attributes import AttributeKey # For semantic attribute keys
import asyncio # Assuming async operation
langwatch.setup()
async def rag_retrieval_manual(query: str):
# Use async with for the span, instead of a decorator
async with langwatch.span(type="rag", name="RAG Document Retrieval") as span:
# ... your async retrieval logic ...
await asyncio.sleep(0.05) # Simulate async work
search_results = [
{"id": "doc-1", "content": "Content for doc 1."},
{"id": "doc-2", "content": "Content for doc 2."},
]
# Update the span with input, context, metadata, and output
span.update(
input=query,
contexts=[
RAGChunk(document_id=doc["id"], content=doc["content"])
for doc in search_results
],
output=search_results,
strategy="manual_vector_search"
)
return search_results
async def handle_user_query_manual(query: str):
# Use async with for the trace
async with langwatch.trace(name="Manual User Query Handling", metadata={"user_id": "manual-user", "query": query}) as trace:
# Call the manually instrumented RAG function
retrieved_docs = await rag_retrieval_manual(query)
# --- Simulate LLM Call Step (manual span) ---
llm_response = ""
async with langwatch.span(type="llm", name="Manual LLM Generation") as llm_span:
llm_input = {"role": "user", "content": f"Context: {retrieved_docs}\nQuery: {query}"}
llm_metadata = {"model_name": "gpt-5"}
# ... your async LLM call logic ...
await asyncio.sleep(0.1)
llm_response = "This is the manual LLM response."
llm_output = {"role": "assistant", "content": llm_response}
# Set input, metadata and output via update
llm_span.update(
input=llm_input,
output=llm_output
llm_metadata=llm_metadata,
)
# Set final trace output via update
trace.update(output=llm_response)
return llm_response
# Example execution (in an async context)
async def main():
result = await handle_user_query_manual("Tell me about manual tracing with context managers.")
print(result)
asyncio.run(main())