Add openai integration
This commit is contained in:
parent
9cf1a287a7
commit
70cf0641d4
10
README.md
10
README.md
@ -20,6 +20,16 @@ You can start editing the page by modifying `app/page.tsx`. The page auto-update
|
||||
|
||||
This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Create a `.env.local` file in the root directory with the following variables:
|
||||
|
||||
```
|
||||
OPENAI_API_KEY=your_openai_api_key
|
||||
```
|
||||
|
||||
You will need an OpenAI API key to use the AI features.
|
||||
|
||||
## Learn More
|
||||
|
||||
To learn more about Next.js, take a look at the following resources:
|
||||
|
||||
@ -1,7 +1,9 @@
|
||||
import type { NextConfig } from "next";
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
/* config options here */
|
||||
env: {
|
||||
OPENAI_API_KEY: process.env.OPENAI_API_KEY,
|
||||
},
|
||||
};
|
||||
|
||||
export default nextConfig;
|
||||
|
||||
792
package-lock.json
generated
792
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -9,9 +9,12 @@
|
||||
"lint": "next lint"
|
||||
},
|
||||
"dependencies": {
|
||||
"@dotenvx/dotenvx": "^1.44.1",
|
||||
"@heroicons/react": "^2.2.0",
|
||||
"dotenv": "^16.5.0",
|
||||
"mongodb": "^6.16.0",
|
||||
"next": "15.3.2",
|
||||
"openai": "^4.100.0",
|
||||
"react": "^19.0.0",
|
||||
"react-dom": "^19.0.0"
|
||||
},
|
||||
@ -22,6 +25,8 @@
|
||||
"@types/react-dom": "^19",
|
||||
"eslint": "^9",
|
||||
"eslint-config-next": "15.3.2",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"typescript": "^5"
|
||||
}
|
||||
}
|
||||
|
||||
116
scripts/openai-examples.js
Normal file
116
scripts/openai-examples.js
Normal file
@ -0,0 +1,116 @@
|
||||
require('dotenv').config();
|
||||
const { OpenAI } = require('openai');
|
||||
|
||||
// Initialize the OpenAI client
|
||||
// In a real application, the API key should be loaded from environment variables
|
||||
const client = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY || 'sk-your-api-key-here',
|
||||
});
|
||||
|
||||
// Function to get a completion
|
||||
async function exampleCompletion() {
|
||||
try {
|
||||
console.log('Running Completion Example...');
|
||||
const completion = await client.completions.create({
|
||||
model: 'gpt-3.5-turbo-instruct',
|
||||
prompt: 'Tell me about electric vehicles in 50 words or less.',
|
||||
max_tokens: 100,
|
||||
});
|
||||
|
||||
console.log('\nCompletion Response:');
|
||||
console.log(completion.choices[0].text.trim());
|
||||
} catch (error) {
|
||||
console.error('Error in completion example:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to get a chat completion
|
||||
async function exampleChatCompletion() {
|
||||
try {
|
||||
console.log('\nRunning Chat Completion Example...');
|
||||
|
||||
const chatCompletion = await client.chat.completions.create({
|
||||
model: 'gpt-3.5-turbo',
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant that specializes in electric vehicles.' },
|
||||
{ role: 'user', content: 'Tell me about electric vehicles in 50 words or less.' }
|
||||
],
|
||||
max_tokens: 100,
|
||||
});
|
||||
|
||||
console.log('\nChat Completion Response:');
|
||||
console.log(chatCompletion.choices[0].message.content.trim());
|
||||
} catch (error) {
|
||||
console.error('Error in chat completion example:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to demonstrate streaming
|
||||
async function exampleStreamingCompletion() {
|
||||
try {
|
||||
console.log('\nRunning Streaming Completion Example...');
|
||||
|
||||
const stream = await client.chat.completions.create({
|
||||
model: 'gpt-3.5-turbo',
|
||||
messages: [
|
||||
{ role: 'system', content: 'You are a helpful assistant that specializes in electric vehicles.' },
|
||||
{ role: 'user', content: 'List 5 benefits of electric vehicles.' }
|
||||
],
|
||||
stream: true,
|
||||
});
|
||||
|
||||
console.log('\nStreaming Response:');
|
||||
for await (const chunk of stream) {
|
||||
process.stdout.write(chunk.choices[0]?.delta?.content || '');
|
||||
}
|
||||
console.log('\n\n--- Stream complete ---');
|
||||
} catch (error) {
|
||||
console.error('Error in streaming example:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Function to generate embeddings
|
||||
async function exampleEmbedding() {
|
||||
try {
|
||||
console.log('\nRunning Embedding Example...');
|
||||
|
||||
const text = 'Electric vehicles are powered by electricity stored in batteries.';
|
||||
const embedding = await client.embeddings.create({
|
||||
model: 'text-embedding-ada-002',
|
||||
input: text,
|
||||
});
|
||||
|
||||
console.log('\nEmbedding Response:');
|
||||
console.log('First 5 dimensions:', embedding.data[0].embedding.slice(0, 5));
|
||||
console.log(`Total dimensions: ${embedding.data[0].embedding.length}`);
|
||||
} catch (error) {
|
||||
console.error('Error in embedding example:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Run all examples
|
||||
async function runExamples() {
|
||||
console.log('=== OPENAI API EXAMPLES ===\n');
|
||||
|
||||
try {
|
||||
// Check if API key is properly set
|
||||
if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === 'sk-your-api-key-here') {
|
||||
console.warn('\n⚠️ WARNING: No valid OpenAI API key found in environment variables.');
|
||||
console.warn(' Some examples may fail. Set the OPENAI_API_KEY environment variable to run these examples.');
|
||||
console.warn(' Example: OPENAI_API_KEY=sk-your-key npm run openai-examples\n');
|
||||
}
|
||||
|
||||
await exampleCompletion();
|
||||
await exampleChatCompletion();
|
||||
await exampleStreamingCompletion();
|
||||
await exampleEmbedding();
|
||||
|
||||
console.log('\n=== ALL EXAMPLES COMPLETE ===');
|
||||
} catch (error) {
|
||||
console.error('Error running examples:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run the examples
|
||||
runExamples();
|
||||
25
src/app/api/cars/assistant/route.ts
Normal file
25
src/app/api/cars/assistant/route.ts
Normal file
@ -0,0 +1,25 @@
|
||||
import { NextResponse } from 'next/server';
|
||||
import { getCarRecommendation } from '@/backend/services/examples/carTools';
|
||||
|
||||
export async function POST(request: Request) {
|
||||
try {
|
||||
const { question } = await request.json();
|
||||
|
||||
if (!question || typeof question !== 'string') {
|
||||
return NextResponse.json(
|
||||
{ error: 'Missing or invalid question parameter' },
|
||||
{ status: 400 }
|
||||
);
|
||||
}
|
||||
|
||||
const response = await getCarRecommendation(question);
|
||||
|
||||
return NextResponse.json({ response });
|
||||
} catch (error) {
|
||||
console.error('Error in assistant API:', error);
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to process the question' },
|
||||
{ status: 500 }
|
||||
);
|
||||
}
|
||||
}
|
||||
6
src/backend/services/index.ts
Normal file
6
src/backend/services/index.ts
Normal file
@ -0,0 +1,6 @@
|
||||
// Re-export OpenAI service functions
|
||||
export {
|
||||
default as openai,
|
||||
getChatCompletion,
|
||||
getChatCompletionWithTools,
|
||||
} from './openai';
|
||||
154
src/backend/services/openai.ts
Normal file
154
src/backend/services/openai.ts
Normal file
@ -0,0 +1,154 @@
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
// Initialize OpenAI client with API key from environment variable
|
||||
const openai = new OpenAI({
|
||||
apiKey: process.env.OPENAI_API_KEY,
|
||||
maxRetries: 2,
|
||||
timeout: 30 * 1000, // 30 seconds timeout
|
||||
});
|
||||
|
||||
type ChatCompletionMessageParam = {
|
||||
role: 'user' | 'assistant' | 'system';
|
||||
content: string;
|
||||
};
|
||||
|
||||
export type FunctionDefinition = {
|
||||
name: string;
|
||||
description: string;
|
||||
parameters: {
|
||||
type: string;
|
||||
properties: Record<string, unknown>;
|
||||
required?: string[];
|
||||
};
|
||||
};
|
||||
|
||||
export type ToolDefinition = {
|
||||
type: 'function';
|
||||
function: FunctionDefinition;
|
||||
};
|
||||
|
||||
export type ToolCallHandler = (
|
||||
name: string,
|
||||
args: Record<string, unknown>
|
||||
) => Promise<string | Record<string, unknown>>;
|
||||
|
||||
/**
|
||||
* Get completion from OpenAI using Chat Completions API (standard method)
|
||||
* @param prompt The prompt to send to OpenAI
|
||||
* @returns The completion text
|
||||
*/
|
||||
export async function getChatCompletion(messages: ChatCompletionMessageParam[]): Promise<string> {
|
||||
try {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o-mini',
|
||||
messages: messages,
|
||||
temperature: 0.1,
|
||||
});
|
||||
|
||||
return completion.choices[0]?.message?.content || '';
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.APIError) {
|
||||
console.error('OpenAI API Error:', {
|
||||
status: error.status,
|
||||
name: error.name,
|
||||
message: error.message,
|
||||
request_id: error.request_id,
|
||||
});
|
||||
} else {
|
||||
console.error('Error calling OpenAI:', error);
|
||||
}
|
||||
throw new Error('Failed to get chat completion from OpenAI');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get completion from OpenAI with tool calling capabilities
|
||||
* @param messages The messages to send to OpenAI
|
||||
* @param tools The tools that the model can use
|
||||
* @param toolHandler Function to handle tool calls
|
||||
* @returns The final completion after tool processing
|
||||
*/
|
||||
export async function getChatCompletionWithTools(
|
||||
messages: ChatCompletionMessageParam[],
|
||||
tools: ToolDefinition[],
|
||||
toolHandler: ToolCallHandler
|
||||
): Promise<string> {
|
||||
try {
|
||||
// Initial request with tools
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages,
|
||||
tools,
|
||||
temperature: 0.1,
|
||||
tool_choice: 'auto',
|
||||
});
|
||||
|
||||
const message = completion.choices[0]?.message;
|
||||
|
||||
// No tool calls, return the content
|
||||
if (!message?.tool_calls?.length) {
|
||||
return message?.content || '';
|
||||
}
|
||||
|
||||
// Handle tool calls
|
||||
const toolCalls = message.tool_calls;
|
||||
const toolResults = await Promise.all(
|
||||
toolCalls.map(async (toolCall) => {
|
||||
const functionCall = toolCall.function;
|
||||
const name = functionCall.name;
|
||||
const args = JSON.parse(functionCall.arguments);
|
||||
|
||||
try {
|
||||
// Execute the tool
|
||||
const result = await toolHandler(name, args);
|
||||
|
||||
// Return tool call result to add to messages
|
||||
return {
|
||||
tool_call_id: toolCall.id,
|
||||
role: 'tool' as const,
|
||||
name,
|
||||
content: typeof result === 'string' ? result : JSON.stringify(result),
|
||||
};
|
||||
} catch (error) {
|
||||
console.error(`Error executing tool ${name}:`, error);
|
||||
return {
|
||||
tool_call_id: toolCall.id,
|
||||
role: 'tool' as const,
|
||||
name,
|
||||
content: JSON.stringify({ error: `Failed to execute tool ${name}` }),
|
||||
};
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
// Add the assistant's message with tool calls and the tool results to messages
|
||||
const updatedMessages = [
|
||||
...messages,
|
||||
message,
|
||||
...toolResults,
|
||||
];
|
||||
|
||||
// Second request with tool results
|
||||
const secondCompletion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: updatedMessages,
|
||||
temperature: 0.1,
|
||||
});
|
||||
|
||||
return secondCompletion.choices[0]?.message?.content || '';
|
||||
} catch (error) {
|
||||
if (error instanceof OpenAI.APIError) {
|
||||
console.error('OpenAI API Error:', {
|
||||
status: error.status,
|
||||
name: error.name,
|
||||
message: error.message,
|
||||
request_id: error.request_id,
|
||||
});
|
||||
} else {
|
||||
console.error('Error calling OpenAI with tools:', error);
|
||||
}
|
||||
throw new Error('Failed to get chat completion with tools from OpenAI');
|
||||
}
|
||||
}
|
||||
|
||||
export default openai;
|
||||
Loading…
x
Reference in New Issue
Block a user