'use client'; import { faker } from '@faker-js/faker'; import { useChat as useBaseChat } from 'ai/react'; import { useSettings } from '@/components/editor/settings'; import { createOllama } from 'ollama-ai-provider'; import { streamText } from 'ai'; export const useChat = () => { const { keys, model } = useSettings(); return useBaseChat({ id: 'editor', api: '/api/ai/command', body: { model: model.value, }, fetch: async (input, init) => { try { // First try the normal API endpoint const res = await fetch(input, init); if (res.ok) return res; // If API endpoint fails, fallback to direct Ollama call const { messages } = await JSON.parse(init?.body as string || '{}'); const ollama = createOllama({ baseURL: 'http://localhost:11434/api' }); const result = await streamText({ model: ollama(model.value || 'phi3'), messages, maxTokens: 2048, temperature: 0.7, }); return result.toDataStreamResponse({ headers: { 'Content-Type': 'text/event-stream', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', } }); } catch (error) { console.error('Chat error:', error); throw error; } }, }); }; // Used for testing. Remove it after implementing useChat api. const fakeStreamText = ({ chunkCount = 10, streamProtocol = 'data', }: { chunkCount?: number; streamProtocol?: 'data' | 'text'; } = {}) => { const chunks = Array.from({ length: chunkCount }, () => ({ delay: faker.number.int({ max: 150, min: 50 }), texts: faker.lorem.words({ max: 3, min: 1 }) + '', })); const encoder = new TextEncoder(); return new ReadableStream({ async start(controller) { for (const chunk of chunks) { await new Promise((resolve) => setTimeout(resolve, chunk.delay)); if (streamProtocol === 'text') { controller.enqueue(encoder.encode(chunk.texts)); } else { controller.enqueue( encoder.encode(`0:${JSON.stringify(chunk.texts)}\n`) ); } } if (streamProtocol === 'data') { controller.enqueue( `d:{"finishReason":"stop","usage":{"promptTokens":0,"completionTokens":${chunks.length}}}\n` ); } controller.close(); }, }); };