@@ -31,32 +31,30 @@ npm install openai
31
31
Import the OpenAI and Prompt Foundry SDKs
32
32
33
33
``` js
34
- import PromptFoundry from " @prompt-foundry/typescript-sdk" ;
35
- import { Configuration , OpenAIApi } from " openai" ;
34
+ import PromptFoundry from ' @prompt-foundry/typescript-sdk' ;
35
+ import { Configuration , OpenAIApi } from ' openai' ;
36
36
37
37
// Initialize Prompt Foundry SDK with your API key
38
38
const promptFoundry = new PromptFoundry ({
39
- apiKey: process .env [" PROMPT_FOUNDRY_API_KEY" ],
39
+ apiKey: process .env [' PROMPT_FOUNDRY_API_KEY' ],
40
40
});
41
41
42
42
// Initialize OpenAI SDK with your API key
43
43
const configuration = new Configuration ({
44
- apiKey: process .env [" OPENAI_API_KEY" ],
44
+ apiKey: process .env [' OPENAI_API_KEY' ],
45
45
});
46
46
const openai = new OpenAIApi (configuration);
47
47
48
48
async function main () {
49
49
// Retrieve model parameters for the prompt
50
- const modelParameters = await promptFoundry .prompts .getParameters (" 1212121" , {
51
- variables: { hello: " world" },
50
+ const modelParameters = await promptFoundry .prompts .getParameters (' 1212121' , {
51
+ variables: { hello: ' world' },
52
52
});
53
53
54
54
// check if provider is Open AI
55
- if (modelParameters .provider === " openai" ) {
55
+ if (modelParameters .provider === ' openai' ) {
56
56
// Use the retrieved parameters to create a chat completion request
57
- const modelResponse = await openai .chat .completions .create (
58
- modelParameters .parameters
59
- );
57
+ const modelResponse = await openai .chat .completions .create (modelParameters .parameters );
60
58
61
59
// Print the response from OpenAI
62
60
console .log (modelResponse .data );
@@ -77,27 +75,27 @@ npm install @anthropic-ai/sdk
77
75
Import the Anthropic and Prompt Foundry SDKs
78
76
79
77
``` js
80
- import PromptFoundry from " @prompt-foundry/typescript-sdk" ;
81
- import Anthropic from " @anthropic-ai/sdk" ;
78
+ import PromptFoundry from ' @prompt-foundry/typescript-sdk' ;
79
+ import Anthropic from ' @anthropic-ai/sdk' ;
82
80
83
81
// Initialize Prompt Foundry SDK with your API key
84
82
const promptFoundry = new PromptFoundry ({
85
- apiKey: process .env [" PROMPT_FOUNDRY_API_KEY" ],
83
+ apiKey: process .env [' PROMPT_FOUNDRY_API_KEY' ],
86
84
});
87
85
88
86
// Initialize Anthropic SDK with your API key
89
87
const anthropic = new Anthropic ({
90
- apiKey: process .env [" ANTHROPIC_API_KEY" ],
88
+ apiKey: process .env [' ANTHROPIC_API_KEY' ],
91
89
});
92
90
93
91
async function main () {
94
92
// Retrieve model parameters for the prompt
95
- const modelParameters = await promptFoundry .prompts .getParameters (" 1212121" , {
96
- variables: { hello: " world" },
93
+ const modelParameters = await promptFoundry .prompts .getParameters (' 1212121' , {
94
+ variables: { hello: ' world' },
97
95
});
98
96
99
97
// check if provider is Open AI
100
- if (modelParameters .provider === " anthropic" ) {
98
+ if (modelParameters .provider === ' anthropic' ) {
101
99
// Use the retrieved parameters to create a chat completion request
102
100
const message = await anthropic .messages .create (modelParameters .parameters );
103
101
@@ -117,7 +115,7 @@ This library includes TypeScript definitions for all request params and response
117
115
``` ts
118
116
import PromptFoundry from ' @prompt-foundry/typescript-sdk' ;
119
117
120
- const promptFoundry = new PromptFoundry ({
118
+ const client = new PromptFoundry ({
121
119
apiKey: process .env [' PROMPT_FOUNDRY_API_KEY' ], // This is the default and can be omitted
122
120
});
123
121
@@ -177,7 +175,7 @@ You can use the `maxRetries` option to configure or disable this:
177
175
<!-- prettier-ignore -->
178
176
``` js
179
177
// Configure the default for all requests:
180
- const promptFoundry = new PromptFoundry ({
178
+ const client = new PromptFoundry ({
181
179
maxRetries: 0 , // default is 2
182
180
});
183
181
@@ -194,7 +192,7 @@ Requests time out after 1 minute by default. You can configure this with a `time
194
192
<!-- prettier-ignore -->
195
193
``` ts
196
194
// Configure the default for all requests:
197
- const promptFoundry = new PromptFoundry ({
195
+ const client = new PromptFoundry ({
198
196
timeout: 20 * 1000 , // 20 seconds (default is 1 minute)
199
197
});
200
198
@@ -218,7 +216,7 @@ You can also use the `.withResponse()` method to get the raw `Response` along wi
218
216
219
217
<!-- prettier-ignore -->
220
218
``` ts
221
- const promptFoundry = new PromptFoundry ();
219
+ const client = new PromptFoundry ();
222
220
223
221
const response = await promptFoundry .prompts .getParameters (' 1212121' ).asResponse ();
224
222
console .log (response .headers .get (' X-My-Header' ));
@@ -327,7 +325,7 @@ import http from 'http';
327
325
import { HttpsProxyAgent } from ' https-proxy-agent' ;
328
326
329
327
// Configure the default for all requests:
330
- const promptFoundry = new PromptFoundry ({
328
+ const client = new PromptFoundry ({
331
329
httpAgent: new HttpsProxyAgent (process .env .PROXY_URL ),
332
330
});
333
331
0 commit comments