Skip to content

Commit 51d6c6c

Browse files
committedApr 18, 2024·
switch back to gemini
1 parent 4403f76 commit 51d6c6c

File tree

1 file changed

+22
-34
lines changed
  • apps/cf-ai-backend/src/routes

1 file changed

+22
-34
lines changed
 

‎apps/cf-ai-backend/src/routes/chat.ts

+22-34
Original file line numberDiff line numberDiff line change
@@ -114,42 +114,30 @@ export async function POST(request: Request, _: CloudflareVectorizeStore, embedd
114114
},
115115
] as Content[];
116116

117-
// const chat = model.startChat({
118-
// history: [...defaultHistory, ...(body.chatHistory ?? [])],
119-
// });
117+
const chat = model.startChat({
118+
history: [...defaultHistory, ...(body.chatHistory ?? [])],
119+
});
120120

121121
const prompt =
122-
`You are supermemory - an agent that answers a question based on the context provided. don't say 'based on the context'. Be very concise and to the point. Give short responses. I expect you to be like a 'Second Brain'. you will be provided with the context (old saved posts) and questions. Answer accordingly. Answer in markdown format` +
122+
`You are supermemory - an agent that answers a question based on the context provided. don't say 'based on the context'. Be very concise and to the point. Give short responses. I expect you to be like a 'Second Brain'. you will be provided with the context (old saved posts) and questions. Answer accordingly. Answer in markdown format. Use bold, italics, bullet points` +
123123
`Context:\n${preparedContext == '' ? "No context, just introduce yourself and say something like 'I don't know, but you can save things from the sidebar on the right and then query me'" : preparedContext + `Question: ${query}\nAnswer:`}\n\n`;
124124

125-
// const output = await chat.sendMessageStream(prompt);
126-
127-
// const response = new Response(
128-
// new ReadableStream({
129-
// async start(controller) {
130-
// const converter = new TextEncoder();
131-
// for await (const chunk of output.stream) {
132-
// const chunkText = await chunk.text();
133-
// const encodedChunk = converter.encode('data: ' + JSON.stringify({ response: chunkText }) + '\n\n');
134-
// controller.enqueue(encodedChunk);
135-
// }
136-
// const doneChunk = converter.encode('data: [DONE]');
137-
// controller.enqueue(doneChunk);
138-
// controller.close();
139-
// },
140-
// }),
141-
// );
142-
// return response;
143-
const ai = new Ai(env?.AI);
144-
// @ts-ignore
145-
const output: AiTextGenerationOutput = (await ai.run('@hf/mistralai/mistral-7b-instruct-v0.2', {
146-
prompt: prompt.slice(0, 6144),
147-
stream: true,
148-
})) as ReadableStream;
149-
150-
return new Response(output, {
151-
headers: {
152-
'content-type': 'text/event-stream',
153-
},
154-
});
125+
const output = await chat.sendMessageStream(prompt);
126+
127+
const response = new Response(
128+
new ReadableStream({
129+
async start(controller) {
130+
const converter = new TextEncoder();
131+
for await (const chunk of output.stream) {
132+
const chunkText = await chunk.text();
133+
const encodedChunk = converter.encode('data: ' + JSON.stringify({ response: chunkText }) + '\n\n');
134+
controller.enqueue(encodedChunk);
135+
}
136+
const doneChunk = converter.encode('data: [DONE]');
137+
controller.enqueue(doneChunk);
138+
controller.close();
139+
},
140+
}),
141+
);
142+
return response;
155143
}

0 commit comments

Comments
 (0)
Please sign in to comment.