|
| 1 | +import * as vscode from 'vscode'; |
| 2 | +import * as dotenv from 'dotenv'; |
| 3 | +// import * as os from 'os'; |
| 4 | + |
| 5 | +import { Readability } from '@mozilla/readability'; |
| 6 | +import { JSDOM } from 'jsdom'; |
| 7 | + |
| 8 | +import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; // add azure stuff here to replace openAI |
| 9 | +// import { AzureAISearchVectorStore } from '@langchain/community/vectorstores/azure_aisearch'; |
| 10 | + |
| 11 | +import { HNSWLib } from 'langchain/vectorstores/hnswlib'; |
| 12 | +import { MemoryVectorStore } from 'langchain/vectorstores/memory'; |
| 13 | +import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; |
| 14 | +import { ChatPromptTemplate } from '@langchain/core/prompts'; |
| 15 | +import { createStuffDocumentsChain } from 'langchain/chains/combine_documents'; |
| 16 | + |
| 17 | +import { Document } from '@langchain/core/documents'; |
| 18 | +import { getEnvironmentAwareColor, getFavoriteColors } from './configuration'; |
| 19 | +import fetch from 'node-fetch'; |
| 20 | +import { peacockSmallIcon } from './models'; |
| 21 | + |
| 22 | +// import { VectorStore } from '@langchain/core/vectorstores'; |
| 23 | + |
| 24 | +// const LANGUAGE_MODEL_ID = 'copilot-gpt-3.5-turbo'; // Use faster model. Alternative is 'copilot-gpt-4', which is slower but more powerful |
| 25 | +const peacockDocsUrl = 'https://www.peacockcode.dev/guide'; |
| 26 | +const telemetrySender: vscode.TelemetrySender = { |
| 27 | + sendEventData: (eventName: string, data: any) => { |
| 28 | + console.log(`Event: ${eventName}, Data: ${data}`); |
| 29 | + }, |
| 30 | + sendErrorData: (error: any, data: any) => { |
| 31 | + console.error(`Error: ${error}, Data: ${data}`); |
| 32 | + }, |
| 33 | +}; |
| 34 | + |
| 35 | +/** |
| 36 | + * Peacock Participant Chat API docs located here: |
| 37 | + * https://code.visualstudio.com/api/extension-guides/chat |
| 38 | + */ |
| 39 | + |
| 40 | +dotenv.config(); //{ path: os.homedir() + '/.env' }); |
| 41 | + |
| 42 | +export async function participantChatHandler(extensionContext: vscode.ExtensionContext) { |
| 43 | + const chatParticipantName = 'vscode-peacock.peacock'; |
| 44 | + // create participant |
| 45 | + const peacockTutor = vscode.chat.createChatParticipant(chatParticipantName, chatRequestHandler); |
| 46 | + |
| 47 | + // add icon to participant |
| 48 | + peacockTutor.iconPath = vscode.Uri.joinPath(extensionContext.extensionUri, peacockSmallIcon); |
| 49 | + |
| 50 | + telemetrySender.sendEventData('peacockTutor', { |
| 51 | + message: `Created chat participant ${chatParticipantName}`, |
| 52 | + }); |
| 53 | + |
| 54 | + // id: string, |
| 55 | + // name: string, |
| 56 | + // userDescription: string, |
| 57 | + // modelDescription: string | undefined, |
| 58 | + // isSlow: boolean | undefined, |
| 59 | + // resolver: ChatVariableResolver, |
| 60 | + // fullName?: string, |
| 61 | + // icon?: ThemeIcon, |
| 62 | + |
| 63 | + // vscode.chat.registerChatVariableResolver( |
| 64 | + // 'peacock', |
| 65 | + // 'peacock-for-vscode', |
| 66 | + // 'peacock-user', |
| 67 | + // 'peacock-docs-model', |
| 68 | + // true, |
| 69 | + // { |
| 70 | + // resolve: async (name, chatVariableContext /* token */) => { |
| 71 | + // try { |
| 72 | + // return await getPeacockDocs(chatVariableContext); |
| 73 | + // } catch (err: any) { |
| 74 | + // // show a notification with the error |
| 75 | + // vscode.window.showErrorMessage(err.message); |
| 76 | + // } |
| 77 | + // }, |
| 78 | + // }, |
| 79 | + // ); |
| 80 | + |
| 81 | + async function chatRequestHandler( |
| 82 | + request: vscode.ChatRequest, |
| 83 | + context: vscode.ChatContext, |
| 84 | + stream: vscode.ChatResponseStream, |
| 85 | + token: vscode.CancellationToken, |
| 86 | + ) { |
| 87 | + try { |
| 88 | + const currentColor = getEnvironmentAwareColor(); |
| 89 | + const { values: favoriteColors } = getFavoriteColors(); |
| 90 | + const peacockColorList = favoriteColors |
| 91 | + .map(color => `- Name: "${color.name}", Color: ${color.value}`) |
| 92 | + .join('\n '); |
| 93 | + |
| 94 | + const basePrompt = ` |
| 95 | + Your job is to help users choose the color scheme to use with their code editor. Users generally want fun colors that work well and compliment each other. Users also want to choose different colors and shdes that will help them differentiate the Instances of the code editors, where each editor has a different color. You will pretend to be a high society interior designer who has vast experience in color design. |
| 96 | +
|
| 97 | + When prompting and responding, use the Peacock emoji. |
| 98 | +
|
| 99 | + Always answer with the color name and the color HEX code. |
| 100 | +
|
| 101 | + Always offer to apply the color using the Peacock Code extension for the user. |
| 102 | +
|
| 103 | + Provide Instructions on how to apply the color. |
| 104 | +
|
| 105 | + If a user asks about the current color, tell them how to find the current color with Peacock and also tell them the current color Is ${currentColor} |
| 106 | +
|
| 107 | + For any questions about the docs, please refer the user to the Peacock Code docs at ${peacockDocsUrl}. |
| 108 | +
|
| 109 | + If the color Is In the Favorites list ${peacockColorList}, suggest that the user apply the color with the command "Peacock: Change Color to Favorite". |
| 110 | +
|
| 111 | + If the color Is not In the favorites list, suggest that the user apply the color with the command "Peacock: Enter Color". |
| 112 | +
|
| 113 | + If the users asks about saving the color to their favorites, suggest that the user apply the color with the command "Peacock: Save Color to Favorites". |
| 114 | +
|
| 115 | + If the user does not specify the name of the color, you can choose any color to respond. |
| 116 | +
|
| 117 | + If the user asks a question about multiple colors, provide the best answer to help them choose. |
| 118 | +
|
| 119 | + If the user asks a question that is about the color or Peacock, and not about coding, create a fun response. |
| 120 | +
|
| 121 | + Always be polite and respectful, and do not use any words that could offend or misrepresent the user. |
| 122 | +
|
| 123 | + Do not refer to the user as "Darling" or other terms of endearment. |
| 124 | +
|
| 125 | + If the user asks a non-programming question, politely decline to respond. |
| 126 | + `; |
| 127 | + |
| 128 | + // const vectorStore = await getPeacockDocsAsVectorStore(basePrompt); |
| 129 | + // // const model = new ChatOpenAI(); // q. do I need to pass openai key here? |
| 130 | + // const model = new ChatOpenAI({ apiKey: getOpenAIKey() }); |
| 131 | + // const questionAnsweringPrompt = ChatPromptTemplate.fromMessages([ |
| 132 | + // ['system', "Answer the user's question using only the sources below:\n\n{context}"], |
| 133 | + // ['human', '{input}'], |
| 134 | + // ]); |
| 135 | + // const ragChain = await createStuffDocumentsChain({ |
| 136 | + // llm: model, |
| 137 | + // prompt: questionAnsweringPrompt, |
| 138 | + // }); |
| 139 | + // const ragChainStream = await ragChain.stream({ |
| 140 | + // input: request.prompt, |
| 141 | + // context: vectorStore, |
| 142 | + // }); |
| 143 | + // for await (const fragment of ragChainStream) { |
| 144 | + // stream.markdown(fragment); |
| 145 | + // } |
| 146 | + |
| 147 | + const messages = [ |
| 148 | + vscode.LanguageModelChatMessage.User(basePrompt), |
| 149 | + vscode.LanguageModelChatMessage.User(request.prompt), |
| 150 | + ]; |
| 151 | + const chatResponse = await request.model.sendRequest(messages, {}, token); |
| 152 | + // Add the response to the chat |
| 153 | + for await (const fragment of chatResponse.text) { |
| 154 | + stream.markdown(fragment); |
| 155 | + } |
| 156 | + |
| 157 | + return; |
| 158 | + } catch (err) { |
| 159 | + console.log(err); |
| 160 | + } |
| 161 | + } |
| 162 | +} |
| 163 | + |
| 164 | +async function getPeacockDocsAsVectorStore(prompt: string) { |
| 165 | + // get the content of the url |
| 166 | + const urlContent = (await downloadWebPage(peacockDocsUrl)) || ''; |
| 167 | + |
| 168 | + // split the text into smaller chunks |
| 169 | + const documents = await splitTextIntoChunks(urlContent); |
| 170 | + |
| 171 | + // create the vector store |
| 172 | + const vectorStoreRetriever = await createVectorStore(documents); |
| 173 | + |
| 174 | + // get the relevant parts of the text content based on the users prompt |
| 175 | + // const docs = await vectorStoreRetriever.getRelevantDocuments(context.prompt); // getRelevantDocuments Is deprecated, use Invoke Instead |
| 176 | + const docs = await vectorStoreRetriever.invoke(prompt); |
| 177 | + |
| 178 | + return docs; |
| 179 | + |
| 180 | + // // assemble the relevant parts of the text content into a single string |
| 181 | + // let pageContent = ''; |
| 182 | + // docs.forEach(doc => { |
| 183 | + // pageContent += doc.pageContent; |
| 184 | + // }); |
| 185 | + |
| 186 | + // return [ |
| 187 | + // { |
| 188 | + // level: vscode.ChatVariableLevel.Full, |
| 189 | + // value: pageContent, |
| 190 | + // }, |
| 191 | + // ]; |
| 192 | +} |
| 193 | + |
| 194 | +async function downloadWebPage(url: string) { |
| 195 | + try { |
| 196 | + const response = await fetch(url); |
| 197 | + const html = await response.text(); |
| 198 | + const doc = new JSDOM(html, { url }); |
| 199 | + const reader = new Readability(doc.window.document); |
| 200 | + const article = reader.parse() || ''; |
| 201 | + |
| 202 | + let content = article ? article.content : ''; |
| 203 | + |
| 204 | + // remove all images |
| 205 | + content = content.replace(/<img[^>]*>/g, ''); |
| 206 | + |
| 207 | + // strip all html chars out of the content |
| 208 | + content = content.replace(/<[^>]*>?/gm, ''); |
| 209 | + |
| 210 | + // remove all line breaks |
| 211 | + content = content.replace(/\r?\n|\r/g, ''); |
| 212 | + |
| 213 | + return content; |
| 214 | + } catch (err: any) { |
| 215 | + // show a notification with the error |
| 216 | + vscode.window.showErrorMessage(err.message); |
| 217 | + } |
| 218 | +} |
| 219 | + |
| 220 | +async function splitTextIntoChunks(text: string) { |
| 221 | + // use text splitting to create a vector store from the content |
| 222 | + const splitter = new RecursiveCharacterTextSplitter({ |
| 223 | + chunkSize: 1000, |
| 224 | + chunkOverlap: 100, |
| 225 | + }); |
| 226 | + |
| 227 | + const documents = await splitter.createDocuments([text]); |
| 228 | + |
| 229 | + return documents; |
| 230 | +} |
| 231 | + |
| 232 | +async function createVectorStore(documents: Document<Record<string, any>>[]) { |
| 233 | + const openAIApiKey = getOpenAIKey(); |
| 234 | + if (!openAIApiKey) { |
| 235 | + throw new Error('OpenAI API key is not set. Please set it using setOpenAIKey function.'); |
| 236 | + } |
| 237 | + |
| 238 | + const embeddings = new OpenAIEmbeddings({ |
| 239 | + model: 'text-embedding-3-large', |
| 240 | + apiKey: openAIApiKey, |
| 241 | + }); |
| 242 | + const vectorStore = new MemoryVectorStore(embeddings); |
| 243 | + vectorStore.addDocuments(documents); |
| 244 | + |
| 245 | + // const vectorStore = await HNSWLib.fromDocuments( |
| 246 | + // documents, |
| 247 | + // new OpenAIEmbeddings({ apiKey: openAIApiKey }), |
| 248 | + // ); |
| 249 | + // Initialize a retriever wrapper around the vector store |
| 250 | + const vectorStoreRetriever = vectorStore.asRetriever(); |
| 251 | + |
| 252 | + return vectorStoreRetriever; |
| 253 | +} |
| 254 | + |
| 255 | +function setOpenAIKey(apiKey: string) { |
| 256 | + process.env.OPENAI_API_KEY = apiKey; |
| 257 | + //dotenv.config({ path: os.homedir() + '/.env' }); |
| 258 | +} |
| 259 | + |
| 260 | +function getOpenAIKey(): string | undefined { |
| 261 | + return process.env.OPENAI_API_KEY; |
| 262 | +} |
0 commit comments