Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 10 additions & 9 deletions docs/source/quick_start.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,15 @@ To use this library, you must have an api key. You can set it two ways: as an en
from predictionguard import PredictionGuard


# You can set you Prediction Guard API Key as an env variable,
# or when creating the client object
os.environ["PREDICTIONGUARD_API_KEY"]
# Set your Prediction Guard token and url as an environmental variable.
os.environ["PREDICTIONGUARD_API_KEY"] = "<api key>"
os.environ["PREDICTIONGUARD_URL"] = "<url>"

client = PredictionGuard(
api_key="<your Prediction Guard API Key>"
)
# Or set your Prediction Guard token and url when initializing the PredictionGuard class.
client = PredictionGuard(
api_key=<api_key>,
url=<url>
)

messages = [
{
Expand All @@ -36,9 +38,8 @@ To use this library, you must have an api key. You can set it two ways: as an en
]

result = client.chat.completions.create(
model="Hermes-2-Pro-Llama-3-8B",
messages=messages,
max_tokens=100
model="Hermes-3-Llama-3.1-8B",
messages=messages
)

print(json.dumps(
Expand Down
2 changes: 1 addition & 1 deletion docs/source/requirements.rst
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
Requirements
=================

To access the API, contact us `here <https://mailchi.mp/predictionguard/getting-started>`_ to get an enterprise access token. You will need this access token to continue.
To access the API, you will need an API Key. Contact us `here <https://predictionguard.com/get-started>`_ to get started.
90 changes: 90 additions & 0 deletions examples/audio.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
{
"cells": [
{
"metadata": {},
"cell_type": "markdown",
"source": "## Transcribing Audio with Prediction Guard",
"id": "53b2be3dbc44dbf2"
},
{
"metadata": {},
"cell_type": "markdown",
"source": "### Setup",
"id": "ea9357a03d7869da"
},
{
"cell_type": "code",
"execution_count": null,
"id": "initial_id",
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Import necessary packages\n",
"import os\n",
"import json\n",
"\n",
"from predictionguard import PredictionGuard\n",
"\n",
"\n",
"# Set your Prediction Guard token and url as an environmental variable.\n",
"os.environ[\"PREDICTIONGUARD_API_KEY\"] = \"<api key>\"\n",
"os.environ[\"PREDICTIONGUARD_URL\"] = \"<url>\"\n",
"\n",
"# Or set your Prediction Guard token and url when initializing the PredictionGuard class.\n",
"client = PredictionGuard(\n",
" api_key=\"<api_key>\",\n",
" url=\"<url>\"\n",
")"
]
},
{
"metadata": {},
"cell_type": "markdown",
"source": "### Transcribe Audio",
"id": "65ffcefb7e8c4f73"
},
{
"metadata": {},
"cell_type": "code",
"outputs": [],
"execution_count": null,
"source": [
"response = client.audio.transcriptions.create(\n",
" model=\"base\",\n",
" file=\"sample_audio.wav\"\n",
")\n",
"\n",
"print(json.dumps(\n",
" response,\n",
" sort_keys=True,\n",
" indent=4,\n",
" separators=(\",\", \": \")\n",
"))"
],
"id": "1b6769e1b2e6bd6b"
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
22 changes: 13 additions & 9 deletions examples/chat.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,11 +27,15 @@
"from predictionguard import PredictionGuard\n",
"\n",
"\n",
"# Set PG API Key\n",
"# Set your Prediction Guard token and url as an environmental variable.\n",
"os.environ[\"PREDICTIONGUARD_API_KEY\"] = \"<api key>\"\n",
"os.environ[\"PREDICTIONGUARD_URL\"] = \"<url>\"\n",
"\n",
"# Initialize PG client\n",
"client = PredictionGuard()"
"# Or set your Prediction Guard token and url when initializing the PredictionGuard class.\n",
"client = PredictionGuard(\n",
" api_key=\"<api_key>\",\n",
" url=\"<url>\"\n",
")"
]
},
{
Expand Down Expand Up @@ -59,9 +63,9 @@
"]\n",
"\n",
"chat_response = client.chat.completions.create(\n",
" model=\"Hermes-2-Pro-Mistral-7B\",\n",
" model=\"Hermes-3-Llama-3.1-8B\",\n",
" messages=messages,\n",
" max_tokens=500,\n",
" max_completion_tokens=500,\n",
" temperature=1.0,\n",
" top_p=1.0,\n",
" top_k=50\n",
Expand Down Expand Up @@ -95,14 +99,14 @@
" },\n",
" {\n",
" \"role\": \"user\",\n",
" \"content\": \"Write me a childrens story about an elf warrior.\"\n",
" \"content\": \"Write me a children's story about an elf warrior.\"\n",
" }\n",
"]\n",
"\n",
"for res in client.chat.completions.create(\n",
" model=\"Hermes-2-Pro-Mistral-7B\",\n",
" model=\"Hermes-3-Llama-3.1-8B\",\n",
" messages=messages,\n",
" max_tokens=100,\n",
" max_completion_tokens=100,\n",
" stream=True\n",
"):\n",
" # Use 'end' parameter in print function to avoid new lines.\n",
Expand Down Expand Up @@ -143,7 +147,7 @@
"]\n",
"\n",
"vision_response = client.chat.completions.create(\n",
" model=\"llava-1.5-7b-hf\",\n",
" model=\"Qwen2.5-VL-7B-Instruct\",\n",
" messages=messages\n",
")\n",
"\n",
Expand Down
60 changes: 42 additions & 18 deletions examples/completions.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,11 @@
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Setup"
]
"source": "### Setup"
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Import necessary packages\n",
"import os\n",
Expand All @@ -27,12 +23,19 @@
"from predictionguard import PredictionGuard\n",
"\n",
"\n",
"# Set PG API Key\n",
"\n",
"# Set your Prediction Guard token and url as an environmental variable.\n",
"os.environ[\"PREDICTIONGUARD_API_KEY\"] = \"<api key>\"\n",
"os.environ[\"PREDICTIONGUARD_URL\"] = \"<url>\"\n",
"\n",
"# Initialize PG client\n",
"client = PredictionGuard()"
]
"# Or set your Prediction Guard token and url when initializing the PredictionGuard class.\n",
"client = PredictionGuard(\n",
" api_key=\"Bg-98uZ5mJPEwFQJE8UN9MuG6KG2SK9gJILyw3nYPFA\",\n",
" url=\"<url>\"\n",
")"
],
"outputs": [],
"execution_count": null
},
{
"cell_type": "markdown",
Expand All @@ -43,14 +46,12 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"response = client.completions.create(\n",
" model=\"Hermes-2-Pro-Mistral-7B\",\n",
" messages=\"Tell me a joke.\",\n",
" max_tokens=500\n",
" model=\"Hermes-3-Llama-3.1-8B\",\n",
" prompt=\"Tell me a joke.\",\n",
" max_tokens=100\n",
")\n",
"\n",
"print(json.dumps(\n",
Expand All @@ -59,7 +60,30 @@
" indent=4,\n",
" separators=(',', ': ')\n",
"))"
]
],
"outputs": [],
"execution_count": null
},
{
"metadata": {},
"cell_type": "markdown",
"source": "### Streaming Completions"
},
{
"metadata": {},
"cell_type": "code",
"source": [
"for res in client.completions.create(\n",
" model=\"Hermes-3-Llama-3.1-8B\",\n",
" prompt=\"Tell me a joke.\",\n",
" max_tokens=100,\n",
" stream=True\n",
"):\n",
" # Use 'end' parameter in print function to avoid new lines.\n",
" print(res[\"data\"][\"choices\"][0][\"text\"], end=\"\")"
],
"outputs": [],
"execution_count": null
},
{
"cell_type": "markdown",
Expand All @@ -70,14 +94,14 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model_list = client.completions.list_models()\n",
"\n",
"print(model_list)"
]
],
"outputs": [],
"execution_count": null
}
],
"metadata": {
Expand Down
Loading
Loading