Guide to setting up and using chat-based testing with LLM websocket servers
# Clone the repository git clone https://github.com/vocera-ai/llm-websocket-server-example.git # Enter into the directory cd llm-websocket-server-example/ # Create virtual environment python3 -m venv .venv # Activate virtual environment source .venv/bin/activate # Install the dependencies pip install -r requirements.txt
main.py
... api_key = 'YOUR-OPENAI-API-KEY' ... SYSTEM_PROMPT = { "role": "system", "content": """Your system prompt""" } ...
python main.py
# Install ngrok curl -sSL https://ngrok-agent.s3.amazonaws.com/ngrok.asc \ | sudo tee /etc/apt/trusted.gpg.d/ngrok.asc >/dev/null \ && echo "deb https://ngrok-agent.s3.amazonaws.com buster main" \ | sudo tee /etc/apt/sources.list.d/ngrok.list \ && sudo apt update \ && sudo apt install ngrok # Authenticate # get your token from: https://dashboard.ngrok.com/get-started/your-authtoken ngrok config add-authtoken <your-token>
ngrok http 127.0.0.1:8765