245 lines
6.4 KiB
TypeScript
245 lines
6.4 KiB
TypeScript
/**
|
|
* AI Service
|
|
*
|
|
* Handles communication with the AI proxy endpoint for chat completions.
|
|
*/
|
|
|
|
import {
|
|
NO_BACKEND_MODE,
|
|
API_ENDPOINTS,
|
|
AI_CONFIG,
|
|
buildApiUrl,
|
|
getApiHeaders,
|
|
logApiDebug,
|
|
} from '../config';
|
|
|
|
// =============================================================================
|
|
// Type Definitions
|
|
// =============================================================================
|
|
|
|
export interface AIMessage {
|
|
role: 'system' | 'user' | 'assistant';
|
|
content: string;
|
|
}
|
|
|
|
export interface AIRequest {
|
|
messages: AIMessage[];
|
|
model?: string;
|
|
}
|
|
|
|
export interface AIResponse {
|
|
id: string;
|
|
object: string;
|
|
created: number;
|
|
model: string;
|
|
choices: Array<{
|
|
index: number;
|
|
message: AIMessage;
|
|
finish_reason: string;
|
|
}>;
|
|
usage: {
|
|
prompt_tokens: number;
|
|
completion_tokens: number;
|
|
total_tokens: number;
|
|
};
|
|
}
|
|
|
|
// =============================================================================
|
|
// Mock Response Generator
|
|
// =============================================================================
|
|
|
|
const createMockResponse = (userMessage: string): AIResponse => {
|
|
return {
|
|
id: `mock-${Date.now()}`,
|
|
object: 'chat.completion',
|
|
created: Math.floor(Date.now() / 1000),
|
|
model: 'mock-model',
|
|
choices: [
|
|
{
|
|
index: 0,
|
|
message: {
|
|
role: 'assistant',
|
|
content: `I received your message: "${userMessage}". This is a mock response since the backend is not connected.`,
|
|
},
|
|
finish_reason: 'stop',
|
|
},
|
|
],
|
|
usage: {
|
|
prompt_tokens: 10,
|
|
completion_tokens: 20,
|
|
total_tokens: 30,
|
|
},
|
|
};
|
|
};
|
|
|
|
// =============================================================================
|
|
// AI Service
|
|
// =============================================================================
|
|
|
|
export const aiService = {
|
|
/**
|
|
* Send chat messages to the AI proxy
|
|
* @param messages - Array of chat messages
|
|
* @param token - JWT token for authentication
|
|
* @returns AI response
|
|
*/
|
|
async chat(messages: AIMessage[], token?: string): Promise<AIResponse> {
|
|
if (NO_BACKEND_MODE) {
|
|
logApiDebug('AI Chat', 'Using mock mode');
|
|
return new Promise((resolve) => {
|
|
setTimeout(() => {
|
|
const lastUserMessage = messages.filter((m) => m.role === 'user').pop();
|
|
resolve(createMockResponse(lastUserMessage?.content || 'Hello'));
|
|
}, AI_CONFIG.MOCK_RESPONSE_DELAY);
|
|
});
|
|
}
|
|
|
|
const url = buildApiUrl(API_ENDPOINTS.AI.PROXY);
|
|
|
|
logApiDebug('AI Request', {
|
|
url,
|
|
hasToken: !!token,
|
|
messageCount: messages.length,
|
|
});
|
|
|
|
try {
|
|
const response = await fetch(url, {
|
|
method: 'POST',
|
|
headers: getApiHeaders(token),
|
|
body: JSON.stringify({ messages } as AIRequest),
|
|
});
|
|
|
|
logApiDebug('AI Response Status', response.status);
|
|
|
|
if (!response.ok) {
|
|
const errorText = await response.text();
|
|
logApiDebug('AI Error Response', errorText);
|
|
|
|
let errorDetail = 'AI request failed';
|
|
try {
|
|
const errorData = JSON.parse(errorText);
|
|
errorDetail = errorData.detail || errorDetail;
|
|
} catch {
|
|
errorDetail = errorText || errorDetail;
|
|
}
|
|
throw new Error(`${response.status}: ${errorDetail}`);
|
|
}
|
|
|
|
const data = await response.json();
|
|
logApiDebug('AI Success', {
|
|
id: data.id,
|
|
model: data.model,
|
|
choicesCount: data.choices?.length,
|
|
});
|
|
|
|
return data;
|
|
} catch (error) {
|
|
console.error('AI proxy error:', error);
|
|
throw error;
|
|
}
|
|
},
|
|
|
|
/**
|
|
* Simple helper for single message chat
|
|
* @param content - User message content
|
|
* @param token - JWT token for authentication
|
|
* @param systemPrompt - Optional custom system prompt
|
|
* @returns AI response text
|
|
*/
|
|
async sendMessage(content: string, token?: string, systemPrompt?: string): Promise<string> {
|
|
const messages: AIMessage[] = [
|
|
{
|
|
role: 'system',
|
|
content: systemPrompt || AI_CONFIG.DEFAULT_SYSTEM_PROMPT,
|
|
},
|
|
{
|
|
role: 'user',
|
|
content,
|
|
},
|
|
];
|
|
|
|
const response = await this.chat(messages, token);
|
|
return response.choices[0]?.message?.content || 'No response';
|
|
},
|
|
|
|
/**
|
|
* Send a message with an image to AI for analysis
|
|
* @param content - User message content
|
|
* @param imageBase64 - Base64 encoded image data
|
|
* @param token - JWT token for authentication
|
|
* @returns AI response text
|
|
*/
|
|
async sendMessageWithImage(content: string, imageBase64: string, token?: string): Promise<string> {
|
|
if (NO_BACKEND_MODE) {
|
|
logApiDebug('AI Image Analysis', 'Using mock mode');
|
|
return new Promise((resolve) => {
|
|
setTimeout(() => {
|
|
resolve('This is a mock image analysis response. The image appears to show an interesting scene. In production, this would be analyzed by Gemini AI.');
|
|
}, AI_CONFIG.MOCK_RESPONSE_DELAY);
|
|
});
|
|
}
|
|
|
|
const url = buildApiUrl(API_ENDPOINTS.AI.PROXY);
|
|
|
|
logApiDebug('AI Image Request', {
|
|
url,
|
|
hasToken: !!token,
|
|
hasImage: !!imageBase64,
|
|
});
|
|
|
|
// Gemini vision format - using multimodal content
|
|
const messages = [
|
|
{
|
|
role: 'user',
|
|
content: [
|
|
{
|
|
type: 'text',
|
|
text: content,
|
|
},
|
|
{
|
|
type: 'image_url',
|
|
image_url: {
|
|
url: `data:image/jpeg;base64,${imageBase64}`,
|
|
},
|
|
},
|
|
],
|
|
},
|
|
];
|
|
|
|
try {
|
|
const response = await fetch(url, {
|
|
method: 'POST',
|
|
headers: getApiHeaders(token),
|
|
body: JSON.stringify({ messages }),
|
|
});
|
|
|
|
logApiDebug('AI Image Response Status', response.status);
|
|
|
|
if (!response.ok) {
|
|
const errorText = await response.text();
|
|
logApiDebug('AI Image Error Response', errorText);
|
|
|
|
let errorDetail = 'AI image request failed';
|
|
try {
|
|
const errorData = JSON.parse(errorText);
|
|
errorDetail = errorData.detail || errorDetail;
|
|
} catch {
|
|
errorDetail = errorText || errorDetail;
|
|
}
|
|
throw new Error(`${response.status}: ${errorDetail}`);
|
|
}
|
|
|
|
const data = await response.json();
|
|
logApiDebug('AI Image Success', {
|
|
id: data.id,
|
|
model: data.model,
|
|
});
|
|
|
|
return data.choices[0]?.message?.content || 'No response';
|
|
} catch (error) {
|
|
console.error('AI image proxy error:', error);
|
|
throw error;
|
|
}
|
|
},
|
|
};
|