require('dotenv').config(); const { OpenAI } = require('openai'); // Initialize the OpenAI client // In a real application, the API key should be loaded from environment variables const client = new OpenAI({ apiKey: process.env.OPENAI_API_KEY || 'sk-your-api-key-here', }); // Function to get a completion async function exampleCompletion() { try { console.log('Running Completion Example...'); const completion = await client.completions.create({ model: 'gpt-3.5-turbo-instruct', prompt: 'Tell me about electric vehicles in 50 words or less.', max_tokens: 100, }); console.log('\nCompletion Response:'); console.log(completion.choices[0].text.trim()); } catch (error) { console.error('Error in completion example:', error.message); } } // Function to get a chat completion async function exampleChatCompletion() { try { console.log('\nRunning Chat Completion Example...'); const chatCompletion = await client.chat.completions.create({ model: 'gpt-3.5-turbo', messages: [ { role: 'system', content: 'You are a helpful assistant that specializes in electric vehicles.' }, { role: 'user', content: 'Tell me about electric vehicles in 50 words or less.' } ], max_tokens: 100, }); console.log('\nChat Completion Response:'); console.log(chatCompletion.choices[0].message.content.trim()); } catch (error) { console.error('Error in chat completion example:', error.message); } } // Function to demonstrate streaming async function exampleStreamingCompletion() { try { console.log('\nRunning Streaming Completion Example...'); const stream = await client.chat.completions.create({ model: 'gpt-3.5-turbo', messages: [ { role: 'system', content: 'You are a helpful assistant that specializes in electric vehicles.' }, { role: 'user', content: 'List 5 benefits of electric vehicles.' } ], stream: true, }); console.log('\nStreaming Response:'); for await (const chunk of stream) { process.stdout.write(chunk.choices[0]?.delta?.content || ''); } console.log('\n\n--- Stream complete ---'); } catch (error) { console.error('Error in streaming example:', error.message); } } // Function to generate embeddings async function exampleEmbedding() { try { console.log('\nRunning Embedding Example...'); const text = 'Electric vehicles are powered by electricity stored in batteries.'; const embedding = await client.embeddings.create({ model: 'text-embedding-ada-002', input: text, }); console.log('\nEmbedding Response:'); console.log('First 5 dimensions:', embedding.data[0].embedding.slice(0, 5)); console.log(`Total dimensions: ${embedding.data[0].embedding.length}`); } catch (error) { console.error('Error in embedding example:', error.message); } } // Run all examples async function runExamples() { console.log('=== OPENAI API EXAMPLES ===\n'); try { // Check if API key is properly set if (!process.env.OPENAI_API_KEY || process.env.OPENAI_API_KEY === 'sk-your-api-key-here') { console.warn('\n⚠️ WARNING: No valid OpenAI API key found in environment variables.'); console.warn(' Some examples may fail. Set the OPENAI_API_KEY environment variable to run these examples.'); console.warn(' Example: OPENAI_API_KEY=sk-your-key npm run openai-examples\n'); } await exampleCompletion(); await exampleChatCompletion(); await exampleStreamingCompletion(); await exampleEmbedding(); console.log('\n=== ALL EXAMPLES COMPLETE ==='); } catch (error) { console.error('Error running examples:', error); process.exit(1); } } // Run the examples runExamples();