Skip to content

Commit 152e2ce

Browse files
feat: Kiro OpenAI integration
1 parent 2c74415 commit 152e2ce

File tree

2 files changed

+37
-12
lines changed

2 files changed

+37
-12
lines changed

.kiro/specs/api-integrations/tasks.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
- Add basic validation and error handling to all existing providers
77
- _Requirements: 2.1, 2.2, 3.1, 3.2_
88

9-
- [ ] 2. Implement direct OpenAI API integration
9+
- [x] 2. Implement direct OpenAI API integration
1010
- Replace proxy API call with direct OpenAI API call to https://api.openai.com/v1/chat/completions
1111
- Add proper request formatting for OpenAI's chat completions API
1212
- Implement OpenAI-specific error handling (rate limits, authentication, etc.)

lib/ai-providers.ts

Lines changed: 36 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -108,30 +108,55 @@ export class OpenAIProvider implements AIProvider {
108108
): Promise<string> {
109109
this.validateRequest(question, correctAnswers, apiKey);
110110

111+
const prompt = `Question: ${question}\n\nCorrect answers: ${correctAnswers.join(
112+
", ",
113+
)}\n\nPlease provide a clear and concise explanation of why these answers are correct. Focus on the key concepts and reasoning.`;
114+
111115
try {
112-
const response = await fetch("/api/ai/openai", {
116+
const response = await fetch("https://api.openai.com/v1/chat/completions", {
113117
method: "POST",
114-
headers: { "Content-Type": "application/json" },
118+
headers: {
119+
"Authorization": `Bearer ${apiKey}`,
120+
"Content-Type": "application/json",
121+
},
115122
body: JSON.stringify({
116-
question,
117-
correctAnswers,
118-
apiKey,
123+
model: "gpt-3.5-turbo",
124+
messages: [
125+
{
126+
role: "user",
127+
content: prompt,
128+
},
129+
],
130+
max_tokens: 500,
131+
temperature: 0.7,
119132
}),
120133
});
121134

122135
if (!response.ok) {
123-
const errorType = response.status === 401 ? 'auth' :
124-
response.status === 429 ? 'rate_limit' : 'network';
125-
throw new AIProviderError(this.name, errorType, `OpenAI API error: ${response.status}`);
136+
const errorData = await response.json().catch(() => ({}));
137+
const errorMessage = errorData.error?.message || `HTTP ${response.status}`;
138+
139+
// OpenAI-specific error handling
140+
if (response.status === 401) {
141+
throw new AIProviderError(this.name, 'auth', `Authentication failed: ${errorMessage}`);
142+
} else if (response.status === 429) {
143+
throw new AIProviderError(this.name, 'rate_limit', `Rate limit exceeded: ${errorMessage}`);
144+
} else if (response.status === 400) {
145+
throw new AIProviderError(this.name, 'validation', `Invalid request: ${errorMessage}`);
146+
} else if (response.status >= 500) {
147+
throw new AIProviderError(this.name, 'network', `OpenAI server error: ${errorMessage}`);
148+
} else {
149+
throw new AIProviderError(this.name, 'network', `OpenAI API error: ${errorMessage}`);
150+
}
126151
}
127152

128153
const data = await response.json();
129154

130-
if (!data.explanation) {
131-
throw new AIProviderError(this.name, 'validation', 'Invalid response from OpenAI');
155+
if (!data.choices?.[0]?.message?.content) {
156+
throw new AIProviderError(this.name, 'validation', 'Invalid response structure from OpenAI');
132157
}
133158

134-
return data.explanation;
159+
return data.choices[0].message.content;
135160
} catch (error) {
136161
if (error instanceof AIProviderError) throw error;
137162
const errorMessage = error instanceof Error ? error.message : 'Unknown error';

0 commit comments

Comments
 (0)