152 lines
5.4 KiB
Plaintext
152 lines
5.4 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"Running simple query tests with OpenAI:\n",
|
|
"\n",
|
|
"Testing simple queries using openai model:\n",
|
|
"\n",
|
|
"Query: What is the history of Maidstone, England?\n",
|
|
"Sending query to http://localhost:8000/api/langchain/interactive_langgraph_query/query with payload: {'query': 'What is the history of Maidstone, England?', 'model': 'openai'}\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"ERROR:root:Error sending query to http://localhost:8000/api/langchain/interactive_langgraph_query/query: 500 Server Error: Internal Server Error for url: http://localhost:8000/api/langchain/interactive_langgraph_query/query\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Response:\n",
|
|
"{\n",
|
|
" \"error\": \"500 Server Error: Internal Server Error for url: http://localhost:8000/api/langchain/interactive_langgraph_query/query\"\n",
|
|
"}\n",
|
|
"==================================================\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"from dotenv import load_dotenv, find_dotenv\n",
|
|
"load_dotenv(find_dotenv())\n",
|
|
"import os\n",
|
|
"import logging\n",
|
|
"# Function to send a query and get the response\n",
|
|
"import requests\n",
|
|
"import json\n",
|
|
"\n",
|
|
"# Define the URL of your FastAPI server\n",
|
|
"BASE_URL = \"http://localhost:8000\" # Adjust this if your server is running on a different port or host\n",
|
|
"\n",
|
|
"# Define the endpoint\n",
|
|
"ENDPOINT = f\"{BASE_URL}/api/langchain/interactive_langgraph_query/query\"\n",
|
|
"\n",
|
|
"def send_query(query, model=\"ollama\"):\n",
|
|
" payload = {\"query\": query, \"model\": model}\n",
|
|
" headers = {\"Content-Type\": \"application/json\"}\n",
|
|
" print(f\"Sending query to {ENDPOINT} with payload: {payload}\")\n",
|
|
" \n",
|
|
" try:\n",
|
|
" response = requests.post(ENDPOINT, json=payload, headers=headers)\n",
|
|
" response.raise_for_status()\n",
|
|
" print(f\"Received response from {ENDPOINT}: {response.json()}\")\n",
|
|
" return response.json()\n",
|
|
" except requests.exceptions.RequestException as e:\n",
|
|
" logging.error(f\"Error sending query to {ENDPOINT}: {str(e)}\")\n",
|
|
" return {\"error\": str(e)}\n",
|
|
"\n",
|
|
"def test_simple_queries(model=\"openai\"):\n",
|
|
" queries = [\n",
|
|
" \"What is the history of Maidstone, England?\"\n",
|
|
" ]\n",
|
|
" \n",
|
|
" print(f\"Testing simple queries using {model} model:\")\n",
|
|
" for query in queries:\n",
|
|
" print(f\"\\nQuery: {query}\")\n",
|
|
" result = send_query(query, model)\n",
|
|
" print(\"Response:\")\n",
|
|
" print(json.dumps(result, indent=2))\n",
|
|
" print(\"=\" * 50)\n",
|
|
"\n",
|
|
"def test_followup_queries(model=\"openai\"):\n",
|
|
" queries = [\n",
|
|
" \"What is the latest local news from a particular town?\"\n",
|
|
" ]\n",
|
|
" \n",
|
|
" print(f\"Testing queries requiring follow-up using {model} model:\")\n",
|
|
" for query in queries:\n",
|
|
" print(f\"\\nInitial Query: {query}\")\n",
|
|
" result = send_query(query, model)\n",
|
|
" print(\"Initial Response:\")\n",
|
|
" print(json.dumps(result, indent=2))\n",
|
|
" \n",
|
|
" follow_up_count = 0\n",
|
|
" max_follow_ups = 3\n",
|
|
" \n",
|
|
" while result.get(\"needs_more_info\", False) and follow_up_count < max_follow_ups:\n",
|
|
" follow_up = input(\"Please provide more information: \")\n",
|
|
" follow_up_query = f\"{query} {follow_up}\"\n",
|
|
" follow_up_result = send_query(follow_up_query, model)\n",
|
|
" print(f\"\\nFollow-up Response {follow_up_count + 1}:\")\n",
|
|
" print(json.dumps(follow_up_result, indent=2))\n",
|
|
" \n",
|
|
" result = follow_up_result\n",
|
|
" follow_up_count += 1\n",
|
|
" \n",
|
|
" if follow_up_count == max_follow_ups:\n",
|
|
" print(\"\\nMaximum number of follow-ups reached. Moving to next query.\")\n",
|
|
" elif not result.get(\"needs_more_info\", False):\n",
|
|
" print(\"\\nFinal Response:\")\n",
|
|
" print(json.dumps(result, indent=2))\n",
|
|
" \n",
|
|
" print(\"=\" * 50)\n",
|
|
"\n",
|
|
"# Run the tests\n",
|
|
"#print(\"Running simple query tests with Ollama:\\n\")\n",
|
|
"#test_simple_queries(\"ollama\")\n",
|
|
"\n",
|
|
"print(\"\\nRunning simple query tests with OpenAI:\\n\")\n",
|
|
"test_simple_queries(\"openai\")\n",
|
|
"\n",
|
|
"#print(\"\\nRunning follow-up query tests with Ollama:\\n\")\n",
|
|
"#test_followup_queries(\"ollama\")\n",
|
|
"\n",
|
|
"#print(\"\\nRunning follow-up query tests with OpenAI:\\n\")\n",
|
|
"#test_followup_queries(\"openai\")"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.9"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|